1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.26 2005/03/01 23:35:14 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 30 /* 31 * This code has two modes of operation, a small write mode and a large 32 * write mode. The small write mode acts like conventional pipes with 33 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 34 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 35 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 36 * the receiving process can copy it directly from the pages in the sending 37 * process. 38 * 39 * If the sending process receives a signal, it is possible that it will 40 * go away, and certainly its address space can change, because control 41 * is returned back to the user-mode side. In that case, the pipe code 42 * arranges to copy the buffer supplied by the user process, to a pageable 43 * kernel buffer, and the receiving process will grab the data from the 44 * pageable kernel buffer. Since signals don't happen all that often, 45 * the copy operation is normally eliminated. 46 * 47 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 48 * happen for small transfers so that the system will not spend all of 49 * its time context switching. PIPE_SIZE is constrained by the 50 * amount of kernel virtual memory. 51 */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/proc.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/filedesc.h> 60 #include <sys/filio.h> 61 #include <sys/ttycom.h> 62 #include <sys/stat.h> 63 #include <sys/poll.h> 64 #include <sys/select.h> 65 #include <sys/signalvar.h> 66 #include <sys/sysproto.h> 67 #include <sys/pipe.h> 68 #include <sys/vnode.h> 69 #include <sys/uio.h> 70 #include <sys/event.h> 71 #include <sys/globaldata.h> 72 #include <sys/module.h> 73 #include <sys/malloc.h> 74 #include <sys/sysctl.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_param.h> 78 #include <sys/lock.h> 79 #include <vm/vm_object.h> 80 #include <vm/vm_kern.h> 81 #include <vm/vm_extern.h> 82 #include <vm/pmap.h> 83 #include <vm/vm_map.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_zone.h> 86 87 #include <sys/file2.h> 88 89 #include <machine/cpufunc.h> 90 91 /* 92 * interfaces to the outside world 93 */ 94 static int pipe_read (struct file *fp, struct uio *uio, 95 struct ucred *cred, int flags, struct thread *td); 96 static int pipe_write (struct file *fp, struct uio *uio, 97 struct ucred *cred, int flags, struct thread *td); 98 static int pipe_close (struct file *fp, struct thread *td); 99 static int pipe_poll (struct file *fp, int events, struct ucred *cred, 100 struct thread *td); 101 static int pipe_kqfilter (struct file *fp, struct knote *kn); 102 static int pipe_stat (struct file *fp, struct stat *sb, struct thread *td); 103 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct thread *td); 104 105 static struct fileops pipeops = { 106 NULL, /* port */ 107 NULL, /* clone */ 108 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter, 109 pipe_stat, pipe_close 110 }; 111 112 static void filt_pipedetach(struct knote *kn); 113 static int filt_piperead(struct knote *kn, long hint); 114 static int filt_pipewrite(struct knote *kn, long hint); 115 116 static struct filterops pipe_rfiltops = 117 { 1, NULL, filt_pipedetach, filt_piperead }; 118 static struct filterops pipe_wfiltops = 119 { 1, NULL, filt_pipedetach, filt_pipewrite }; 120 121 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 122 123 /* 124 * Default pipe buffer size(s), this can be kind-of large now because pipe 125 * space is pageable. The pipe code will try to maintain locality of 126 * reference for performance reasons, so small amounts of outstanding I/O 127 * will not wipe the cache. 128 */ 129 #define MINPIPESIZE (PIPE_SIZE/3) 130 #define MAXPIPESIZE (2*PIPE_SIZE/3) 131 132 /* 133 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 134 * is there so that on large systems, we don't exhaust it. 135 */ 136 #define MAXPIPEKVA (8*1024*1024) 137 138 /* 139 * Limit for direct transfers, we cannot, of course limit 140 * the amount of kva for pipes in general though. 141 */ 142 #define LIMITPIPEKVA (16*1024*1024) 143 144 /* 145 * Limit the number of "big" pipes 146 */ 147 #define LIMITBIGPIPES 32 148 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 149 150 static int pipe_maxbig = LIMITBIGPIPES; 151 static int pipe_maxcache = PIPEQ_MAX_CACHE; 152 static int pipe_nbig; 153 static int pipe_bcache_alloc; 154 static int pipe_bkmem_alloc; 155 static int pipe_dwrite_enable = 1; /* 0:copy, 1:kmem/sfbuf 2:force */ 156 static int pipe_dwrite_sfbuf = 1; /* 0:kmem_map 1:sfbufs 2:sfbufs_dmap */ 157 /* 3:sfbuf_dmap w/ forced invlpg */ 158 159 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 160 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 161 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 162 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 163 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 164 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 165 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 166 SYSCTL_INT(_kern_pipe, OID_AUTO, dwrite_enable, 167 CTLFLAG_RW, &pipe_dwrite_enable, 0, "1:enable/2:force direct writes"); 168 SYSCTL_INT(_kern_pipe, OID_AUTO, dwrite_sfbuf, 169 CTLFLAG_RW, &pipe_dwrite_sfbuf, 0, 170 "(if dwrite_enable) 0:kmem 1:sfbuf 2:sfbuf_dmap 3:sfbuf_dmap_forceinvlpg"); 171 #if !defined(NO_PIPE_SYSCTL_STATS) 172 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 173 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 174 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 175 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 176 #endif 177 178 static void pipeclose (struct pipe *cpipe); 179 static void pipe_free_kmem (struct pipe *cpipe); 180 static int pipe_create (struct pipe **cpipep); 181 static __inline int pipelock (struct pipe *cpipe, int catch); 182 static __inline void pipeunlock (struct pipe *cpipe); 183 static __inline void pipeselwakeup (struct pipe *cpipe); 184 #ifndef PIPE_NODIRECT 185 static int pipe_build_write_buffer (struct pipe *wpipe, struct uio *uio); 186 static int pipe_direct_write (struct pipe *wpipe, struct uio *uio); 187 static void pipe_clone_write_buffer (struct pipe *wpipe); 188 #endif 189 static int pipespace (struct pipe *cpipe, int size); 190 191 /* 192 * The pipe system call for the DTYPE_PIPE type of pipes 193 * 194 * pipe_ARgs(int dummy) 195 */ 196 197 /* ARGSUSED */ 198 int 199 pipe(struct pipe_args *uap) 200 { 201 struct thread *td = curthread; 202 struct proc *p = td->td_proc; 203 struct filedesc *fdp; 204 struct file *rf, *wf; 205 struct pipe *rpipe, *wpipe; 206 int fd1, fd2, error; 207 208 KKASSERT(p); 209 fdp = p->p_fd; 210 211 rpipe = wpipe = NULL; 212 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 213 pipeclose(rpipe); 214 pipeclose(wpipe); 215 return (ENFILE); 216 } 217 218 rpipe->pipe_state |= PIPE_DIRECTOK; 219 wpipe->pipe_state |= PIPE_DIRECTOK; 220 221 /* 222 * Select the direct-map features to use for this pipe. Since the 223 * sysctl's can change on the fly we record the settings when the 224 * pipe is created. 225 * 226 * Generally speaking the system will default to what we consider 227 * to be the best-balanced and most stable option. Right now this 228 * is SFBUF1. Modes 2 and 3 are considered experiemental at the 229 * moment. 230 */ 231 wpipe->pipe_feature = PIPE_COPY; 232 if (pipe_dwrite_enable) { 233 switch(pipe_dwrite_sfbuf) { 234 case 0: 235 wpipe->pipe_feature = PIPE_KMEM; 236 break; 237 case 1: 238 wpipe->pipe_feature = PIPE_SFBUF1; 239 break; 240 case 2: 241 case 3: 242 wpipe->pipe_feature = PIPE_SFBUF2; 243 break; 244 } 245 } 246 rpipe->pipe_feature = wpipe->pipe_feature; 247 248 error = falloc(p, &rf, &fd1); 249 if (error) { 250 pipeclose(rpipe); 251 pipeclose(wpipe); 252 return (error); 253 } 254 uap->sysmsg_fds[0] = fd1; 255 256 /* 257 * Warning: once we've gotten past allocation of the fd for the 258 * read-side, we can only drop the read side via fdrop() in order 259 * to avoid races against processes which manage to dup() the read 260 * side while we are blocked trying to allocate the write side. 261 */ 262 rf->f_flag = FREAD | FWRITE; 263 rf->f_type = DTYPE_PIPE; 264 rf->f_data = (caddr_t)rpipe; 265 rf->f_ops = &pipeops; 266 error = falloc(p, &wf, &fd2); 267 if (error) { 268 if (fdp->fd_ofiles[fd1] == rf) { 269 fdp->fd_ofiles[fd1] = NULL; 270 fdrop(rf, td); 271 } 272 fdrop(rf, td); 273 /* rpipe has been closed by fdrop(). */ 274 pipeclose(wpipe); 275 return (error); 276 } 277 wf->f_flag = FREAD | FWRITE; 278 wf->f_type = DTYPE_PIPE; 279 wf->f_data = (caddr_t)wpipe; 280 wf->f_ops = &pipeops; 281 uap->sysmsg_fds[1] = fd2; 282 283 rpipe->pipe_peer = wpipe; 284 wpipe->pipe_peer = rpipe; 285 fdrop(rf, td); 286 fdrop(wf, td); 287 288 return (0); 289 } 290 291 /* 292 * Allocate kva for pipe circular buffer, the space is pageable 293 * This routine will 'realloc' the size of a pipe safely, if it fails 294 * it will retain the old buffer. 295 * If it fails it will return ENOMEM. 296 */ 297 static int 298 pipespace(struct pipe *cpipe, int size) 299 { 300 struct vm_object *object; 301 caddr_t buffer; 302 int npages, error; 303 304 npages = round_page(size) / PAGE_SIZE; 305 object = cpipe->pipe_buffer.object; 306 307 /* 308 * [re]create the object if necessary and reserve space for it 309 * in the kernel_map. The object and memory are pageable. On 310 * success, free the old resources before assigning the new 311 * ones. 312 */ 313 if (object == NULL || object->size != npages) { 314 object = vm_object_allocate(OBJT_DEFAULT, npages); 315 buffer = (caddr_t) vm_map_min(kernel_map); 316 317 error = vm_map_find(kernel_map, object, 0, 318 (vm_offset_t *) &buffer, size, 1, 319 VM_PROT_ALL, VM_PROT_ALL, 0); 320 321 if (error != KERN_SUCCESS) { 322 vm_object_deallocate(object); 323 return (ENOMEM); 324 } 325 pipe_free_kmem(cpipe); 326 cpipe->pipe_buffer.object = object; 327 cpipe->pipe_buffer.buffer = buffer; 328 cpipe->pipe_buffer.size = size; 329 ++pipe_bkmem_alloc; 330 } else { 331 ++pipe_bcache_alloc; 332 } 333 cpipe->pipe_buffer.in = 0; 334 cpipe->pipe_buffer.out = 0; 335 cpipe->pipe_buffer.cnt = 0; 336 return (0); 337 } 338 339 /* 340 * Initialize and allocate VM and memory for pipe, pulling the pipe from 341 * our per-cpu cache if possible. For now make sure it is sized for the 342 * smaller PIPE_SIZE default. 343 */ 344 static int 345 pipe_create(cpipep) 346 struct pipe **cpipep; 347 { 348 globaldata_t gd = mycpu; 349 struct pipe *cpipe; 350 int error; 351 352 if ((cpipe = gd->gd_pipeq) != NULL) { 353 gd->gd_pipeq = cpipe->pipe_peer; 354 --gd->gd_pipeqcount; 355 cpipe->pipe_peer = NULL; 356 } else { 357 cpipe = malloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 358 } 359 *cpipep = cpipe; 360 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 361 return (error); 362 vfs_timestamp(&cpipe->pipe_ctime); 363 cpipe->pipe_atime = cpipe->pipe_ctime; 364 cpipe->pipe_mtime = cpipe->pipe_ctime; 365 return (0); 366 } 367 368 369 /* 370 * lock a pipe for I/O, blocking other access 371 */ 372 static __inline int 373 pipelock(cpipe, catch) 374 struct pipe *cpipe; 375 int catch; 376 { 377 int error; 378 379 while (cpipe->pipe_state & PIPE_LOCK) { 380 cpipe->pipe_state |= PIPE_LWANT; 381 error = tsleep(cpipe, (catch ? PCATCH : 0), "pipelk", 0); 382 if (error != 0) 383 return (error); 384 } 385 cpipe->pipe_state |= PIPE_LOCK; 386 return (0); 387 } 388 389 /* 390 * unlock a pipe I/O lock 391 */ 392 static __inline void 393 pipeunlock(cpipe) 394 struct pipe *cpipe; 395 { 396 397 cpipe->pipe_state &= ~PIPE_LOCK; 398 if (cpipe->pipe_state & PIPE_LWANT) { 399 cpipe->pipe_state &= ~PIPE_LWANT; 400 wakeup(cpipe); 401 } 402 } 403 404 static __inline void 405 pipeselwakeup(cpipe) 406 struct pipe *cpipe; 407 { 408 409 if (cpipe->pipe_state & PIPE_SEL) { 410 cpipe->pipe_state &= ~PIPE_SEL; 411 selwakeup(&cpipe->pipe_sel); 412 } 413 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 414 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 415 KNOTE(&cpipe->pipe_sel.si_note, 0); 416 } 417 418 /* ARGSUSED */ 419 static int 420 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, 421 int flags, struct thread *td) 422 { 423 struct pipe *rpipe = (struct pipe *) fp->f_data; 424 int error; 425 int nread = 0; 426 u_int size; 427 428 ++rpipe->pipe_busy; 429 error = pipelock(rpipe, 1); 430 if (error) 431 goto unlocked_error; 432 433 while (uio->uio_resid) { 434 caddr_t va; 435 436 if (rpipe->pipe_buffer.cnt > 0) { 437 /* 438 * normal pipe buffer receive 439 */ 440 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 441 if (size > rpipe->pipe_buffer.cnt) 442 size = rpipe->pipe_buffer.cnt; 443 if (size > (u_int) uio->uio_resid) 444 size = (u_int) uio->uio_resid; 445 446 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 447 size, uio); 448 if (error) 449 break; 450 451 rpipe->pipe_buffer.out += size; 452 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 453 rpipe->pipe_buffer.out = 0; 454 455 rpipe->pipe_buffer.cnt -= size; 456 457 /* 458 * If there is no more to read in the pipe, reset 459 * its pointers to the beginning. This improves 460 * cache hit stats. 461 */ 462 if (rpipe->pipe_buffer.cnt == 0) { 463 rpipe->pipe_buffer.in = 0; 464 rpipe->pipe_buffer.out = 0; 465 } 466 nread += size; 467 #ifndef PIPE_NODIRECT 468 } else if (rpipe->pipe_kva && 469 rpipe->pipe_feature == PIPE_KMEM && 470 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 471 == PIPE_DIRECTW 472 ) { 473 /* 474 * Direct copy using source-side kva mapping 475 */ 476 size = rpipe->pipe_map.xio_bytes - 477 rpipe->pipe_buffer.out; 478 if (size > (u_int)uio->uio_resid) 479 size = (u_int)uio->uio_resid; 480 va = (caddr_t)rpipe->pipe_kva + 481 xio_kvaoffset(&rpipe->pipe_map, rpipe->pipe_buffer.out); 482 error = uiomove(va, size, uio); 483 if (error) 484 break; 485 nread += size; 486 rpipe->pipe_buffer.out += size; 487 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 488 rpipe->pipe_state |= PIPE_DIRECTIP; 489 rpipe->pipe_state &= ~PIPE_DIRECTW; 490 wakeup(rpipe); 491 } 492 } else if (rpipe->pipe_buffer.out != rpipe->pipe_map.xio_bytes && 493 rpipe->pipe_kva && 494 rpipe->pipe_feature == PIPE_SFBUF2 && 495 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 496 == PIPE_DIRECTW 497 ) { 498 /* 499 * Direct copy, bypassing a kernel buffer. We cannot 500 * mess with the direct-write buffer until 501 * PIPE_DIRECTIP is cleared. In order to prevent 502 * the pipe_write code from racing itself in 503 * direct_write, we set DIRECTIP when we clear 504 * DIRECTW after we have exhausted the buffer. 505 */ 506 if (pipe_dwrite_sfbuf == 3) 507 rpipe->pipe_kvamask = 0; 508 pmap_qenter2(rpipe->pipe_kva, rpipe->pipe_map.xio_pages, 509 rpipe->pipe_map.xio_npages, 510 &rpipe->pipe_kvamask); 511 size = rpipe->pipe_map.xio_bytes - 512 rpipe->pipe_buffer.out; 513 if (size > (u_int)uio->uio_resid) 514 size = (u_int)uio->uio_resid; 515 va = (caddr_t)rpipe->pipe_kva + xio_kvaoffset(&rpipe->pipe_map, rpipe->pipe_buffer.out); 516 error = uiomove(va, size, uio); 517 if (error) 518 break; 519 nread += size; 520 rpipe->pipe_buffer.out += size; 521 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 522 rpipe->pipe_state |= PIPE_DIRECTIP; 523 rpipe->pipe_state &= ~PIPE_DIRECTW; 524 wakeup(rpipe); 525 } 526 } else if (rpipe->pipe_buffer.out != rpipe->pipe_map.xio_bytes && 527 rpipe->pipe_feature == PIPE_SFBUF1 && 528 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 529 == PIPE_DIRECTW 530 ) { 531 /* 532 * Direct copy, bypassing a kernel buffer. We cannot 533 * mess with the direct-write buffer until 534 * PIPE_DIRECTIP is cleared. In order to prevent 535 * the pipe_write code from racing itself in 536 * direct_write, we set DIRECTIP when we clear 537 * DIRECTW after we have exhausted the buffer. 538 */ 539 error = xio_uio_copy(&rpipe->pipe_map, rpipe->pipe_buffer.out, uio, &size); 540 if (error) 541 break; 542 nread += size; 543 rpipe->pipe_buffer.out += size; 544 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 545 rpipe->pipe_state |= PIPE_DIRECTIP; 546 rpipe->pipe_state &= ~PIPE_DIRECTW; 547 wakeup(rpipe); 548 } 549 #endif 550 } else { 551 /* 552 * detect EOF condition 553 * read returns 0 on EOF, no need to set error 554 */ 555 if (rpipe->pipe_state & PIPE_EOF) 556 break; 557 558 /* 559 * If the "write-side" has been blocked, wake it up now. 560 */ 561 if (rpipe->pipe_state & PIPE_WANTW) { 562 rpipe->pipe_state &= ~PIPE_WANTW; 563 wakeup(rpipe); 564 } 565 566 /* 567 * Break if some data was read. 568 */ 569 if (nread > 0) 570 break; 571 572 /* 573 * Unlock the pipe buffer for our remaining 574 * processing. We will either break out with an 575 * error or we will sleep and relock to loop. 576 */ 577 pipeunlock(rpipe); 578 579 /* 580 * Handle non-blocking mode operation or 581 * wait for more data. 582 */ 583 if (fp->f_flag & FNONBLOCK) { 584 error = EAGAIN; 585 } else { 586 rpipe->pipe_state |= PIPE_WANTR; 587 if ((error = tsleep(rpipe, PCATCH|PNORESCHED, 588 "piperd", 0)) == 0) { 589 error = pipelock(rpipe, 1); 590 } 591 } 592 if (error) 593 goto unlocked_error; 594 } 595 } 596 pipeunlock(rpipe); 597 598 if (error == 0) 599 vfs_timestamp(&rpipe->pipe_atime); 600 unlocked_error: 601 --rpipe->pipe_busy; 602 603 /* 604 * PIPE_WANT processing only makes sense if pipe_busy is 0. 605 */ 606 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 607 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 608 wakeup(rpipe); 609 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 610 /* 611 * Handle write blocking hysteresis. 612 */ 613 if (rpipe->pipe_state & PIPE_WANTW) { 614 rpipe->pipe_state &= ~PIPE_WANTW; 615 wakeup(rpipe); 616 } 617 } 618 619 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 620 pipeselwakeup(rpipe); 621 return (error); 622 } 623 624 #ifndef PIPE_NODIRECT 625 /* 626 * Map the sending processes' buffer into kernel space and wire it. 627 * This is similar to a physical write operation. 628 */ 629 static int 630 pipe_build_write_buffer(wpipe, uio) 631 struct pipe *wpipe; 632 struct uio *uio; 633 { 634 int error; 635 u_int size; 636 637 size = (u_int) uio->uio_iov->iov_len; 638 if (size > wpipe->pipe_buffer.size) 639 size = wpipe->pipe_buffer.size; 640 641 error = xio_init_ubuf(&wpipe->pipe_map, uio->uio_iov->iov_base, 642 size, XIOF_READ); 643 wpipe->pipe_buffer.out = 0; 644 if (error) 645 return(error); 646 647 /* 648 * Create a kernel map for KMEM and SFBUF2 copy modes. SFBUF2 will 649 * map the pages on the target while KMEM maps the pages now. 650 */ 651 switch(wpipe->pipe_feature) { 652 case PIPE_KMEM: 653 case PIPE_SFBUF2: 654 if (wpipe->pipe_kva == NULL) { 655 wpipe->pipe_kva = 656 kmem_alloc_nofault(kernel_map, XIO_INTERNAL_SIZE); 657 wpipe->pipe_kvamask = 0; 658 } 659 if (wpipe->pipe_feature == PIPE_KMEM) { 660 pmap_qenter(wpipe->pipe_kva, wpipe->pipe_map.xio_pages, 661 wpipe->pipe_map.xio_npages); 662 } 663 break; 664 default: 665 break; 666 } 667 668 /* 669 * And update the uio data. The XIO might have loaded fewer bytes 670 * then requested so reload 'size'. 671 */ 672 size = wpipe->pipe_map.xio_bytes; 673 uio->uio_iov->iov_len -= size; 674 uio->uio_iov->iov_base += size; 675 if (uio->uio_iov->iov_len == 0) 676 uio->uio_iov++; 677 uio->uio_resid -= size; 678 uio->uio_offset += size; 679 return (0); 680 } 681 682 /* 683 * In the case of a signal, the writing process might go away. This 684 * code copies the data into the circular buffer so that the source 685 * pages can be freed without loss of data. 686 * 687 * Note that in direct mode pipe_buffer.out is used to track the 688 * XIO offset. We are converting the direct mode into buffered mode 689 * which changes the meaning of pipe_buffer.out. 690 */ 691 static void 692 pipe_clone_write_buffer(wpipe) 693 struct pipe *wpipe; 694 { 695 int size; 696 int offset; 697 698 offset = wpipe->pipe_buffer.out; 699 size = wpipe->pipe_map.xio_bytes - offset; 700 701 KKASSERT(size <= wpipe->pipe_buffer.size); 702 703 wpipe->pipe_buffer.in = size; 704 wpipe->pipe_buffer.out = 0; 705 wpipe->pipe_buffer.cnt = size; 706 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTIP); 707 708 xio_copy_xtok(&wpipe->pipe_map, offset, wpipe->pipe_buffer.buffer, size); 709 xio_release(&wpipe->pipe_map); 710 if (wpipe->pipe_kva) { 711 pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); 712 kmem_free(kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); 713 wpipe->pipe_kva = NULL; 714 } 715 } 716 717 /* 718 * This implements the pipe buffer write mechanism. Note that only 719 * a direct write OR a normal pipe write can be pending at any given time. 720 * If there are any characters in the pipe buffer, the direct write will 721 * be deferred until the receiving process grabs all of the bytes from 722 * the pipe buffer. Then the direct mapping write is set-up. 723 */ 724 static int 725 pipe_direct_write(wpipe, uio) 726 struct pipe *wpipe; 727 struct uio *uio; 728 { 729 int error; 730 731 retry: 732 while (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 733 if (wpipe->pipe_state & PIPE_WANTR) { 734 wpipe->pipe_state &= ~PIPE_WANTR; 735 wakeup(wpipe); 736 } 737 wpipe->pipe_state |= PIPE_WANTW; 738 error = tsleep(wpipe, PCATCH, "pipdww", 0); 739 if (error) 740 goto error2; 741 if (wpipe->pipe_state & PIPE_EOF) { 742 error = EPIPE; 743 goto error2; 744 } 745 } 746 KKASSERT(wpipe->pipe_map.xio_bytes == 0); 747 if (wpipe->pipe_buffer.cnt > 0) { 748 if (wpipe->pipe_state & PIPE_WANTR) { 749 wpipe->pipe_state &= ~PIPE_WANTR; 750 wakeup(wpipe); 751 } 752 753 wpipe->pipe_state |= PIPE_WANTW; 754 error = tsleep(wpipe, PCATCH, "pipdwc", 0); 755 if (error) 756 goto error2; 757 if (wpipe->pipe_state & PIPE_EOF) { 758 error = EPIPE; 759 goto error2; 760 } 761 goto retry; 762 } 763 764 /* 765 * Build our direct-write buffer 766 */ 767 wpipe->pipe_state |= PIPE_DIRECTW | PIPE_DIRECTIP; 768 error = pipe_build_write_buffer(wpipe, uio); 769 if (error) 770 goto error1; 771 wpipe->pipe_state &= ~PIPE_DIRECTIP; 772 773 /* 774 * Wait until the receiver has snarfed the data. Since we are likely 775 * going to sleep we optimize the case and yield synchronously, 776 * possibly avoiding the tsleep(). 777 */ 778 error = 0; 779 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 780 if (wpipe->pipe_state & PIPE_EOF) { 781 pipelock(wpipe, 0); 782 xio_release(&wpipe->pipe_map); 783 if (wpipe->pipe_kva) { 784 pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); 785 kmem_free(kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); 786 wpipe->pipe_kva = NULL; 787 } 788 pipeunlock(wpipe); 789 pipeselwakeup(wpipe); 790 error = EPIPE; 791 goto error1; 792 } 793 if (wpipe->pipe_state & PIPE_WANTR) { 794 wpipe->pipe_state &= ~PIPE_WANTR; 795 wakeup(wpipe); 796 } 797 pipeselwakeup(wpipe); 798 error = tsleep(wpipe, PCATCH|PNORESCHED, "pipdwt", 0); 799 } 800 pipelock(wpipe,0); 801 if (wpipe->pipe_state & PIPE_DIRECTW) { 802 /* 803 * this bit of trickery substitutes a kernel buffer for 804 * the process that might be going away. 805 */ 806 pipe_clone_write_buffer(wpipe); 807 KKASSERT((wpipe->pipe_state & PIPE_DIRECTIP) == 0); 808 } else { 809 /* 810 * note: The pipe_kva mapping is not qremove'd here. For 811 * legacy PIPE_KMEM mode this constitutes an improvement 812 * over the original FreeBSD-4 algorithm. For PIPE_SFBUF2 813 * mode the kva mapping must not be removed to get the 814 * caching benefit. 815 * 816 * For testing purposes we will give the original algorithm 817 * the benefit of the doubt 'what it could have been', and 818 * keep the optimization. 819 */ 820 KKASSERT(wpipe->pipe_state & PIPE_DIRECTIP); 821 xio_release(&wpipe->pipe_map); 822 wpipe->pipe_state &= ~PIPE_DIRECTIP; 823 } 824 pipeunlock(wpipe); 825 return (error); 826 827 /* 828 * Direct-write error, clear the direct write flags. 829 */ 830 error1: 831 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTIP); 832 /* fallthrough */ 833 834 /* 835 * General error, wakeup the other side if it happens to be sleeping. 836 */ 837 error2: 838 wakeup(wpipe); 839 return (error); 840 } 841 #endif 842 843 static int 844 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, 845 int flags, struct thread *td) 846 { 847 int error = 0; 848 int orig_resid; 849 struct pipe *wpipe, *rpipe; 850 851 rpipe = (struct pipe *) fp->f_data; 852 wpipe = rpipe->pipe_peer; 853 854 /* 855 * detect loss of pipe read side, issue SIGPIPE if lost. 856 */ 857 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 858 return (EPIPE); 859 } 860 ++wpipe->pipe_busy; 861 862 /* 863 * If it is advantageous to resize the pipe buffer, do 864 * so. 865 */ 866 if ((uio->uio_resid > PIPE_SIZE) && 867 (pipe_nbig < pipe_maxbig) && 868 (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) == 0 && 869 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 870 (wpipe->pipe_buffer.cnt == 0)) { 871 872 if ((error = pipelock(wpipe,1)) == 0) { 873 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 874 pipe_nbig++; 875 pipeunlock(wpipe); 876 } 877 } 878 879 /* 880 * If an early error occured unbusy and return, waking up any pending 881 * readers. 882 */ 883 if (error) { 884 --wpipe->pipe_busy; 885 if ((wpipe->pipe_busy == 0) && 886 (wpipe->pipe_state & PIPE_WANT)) { 887 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 888 wakeup(wpipe); 889 } 890 return(error); 891 } 892 893 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone")); 894 895 orig_resid = uio->uio_resid; 896 897 while (uio->uio_resid) { 898 int space; 899 900 #ifndef PIPE_NODIRECT 901 /* 902 * If the transfer is large, we can gain performance if 903 * we do process-to-process copies directly. 904 * If the write is non-blocking, we don't use the 905 * direct write mechanism. 906 * 907 * The direct write mechanism will detect the reader going 908 * away on us. 909 */ 910 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT || 911 pipe_dwrite_enable > 1) && 912 (fp->f_flag & FNONBLOCK) == 0 && 913 pipe_dwrite_enable) { 914 error = pipe_direct_write( wpipe, uio); 915 if (error) 916 break; 917 continue; 918 } 919 #endif 920 921 /* 922 * Pipe buffered writes cannot be coincidental with 923 * direct writes. We wait until the currently executing 924 * direct write is completed before we start filling the 925 * pipe buffer. We break out if a signal occurs or the 926 * reader goes away. 927 */ 928 retrywrite: 929 while (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 930 if (wpipe->pipe_state & PIPE_WANTR) { 931 wpipe->pipe_state &= ~PIPE_WANTR; 932 wakeup(wpipe); 933 } 934 error = tsleep(wpipe, PCATCH, "pipbww", 0); 935 if (wpipe->pipe_state & PIPE_EOF) 936 break; 937 if (error) 938 break; 939 } 940 if (wpipe->pipe_state & PIPE_EOF) { 941 error = EPIPE; 942 break; 943 } 944 945 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 946 947 /* Writes of size <= PIPE_BUF must be atomic. */ 948 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 949 space = 0; 950 951 /* 952 * Write to fill, read size handles write hysteresis. Also 953 * additional restrictions can cause select-based non-blocking 954 * writes to spin. 955 */ 956 if (space > 0) { 957 if ((error = pipelock(wpipe,1)) == 0) { 958 int size; /* Transfer size */ 959 int segsize; /* first segment to transfer */ 960 961 /* 962 * It is possible for a direct write to 963 * slip in on us... handle it here... 964 */ 965 if (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 966 pipeunlock(wpipe); 967 goto retrywrite; 968 } 969 /* 970 * If a process blocked in uiomove, our 971 * value for space might be bad. 972 * 973 * XXX will we be ok if the reader has gone 974 * away here? 975 */ 976 if (space > wpipe->pipe_buffer.size - 977 wpipe->pipe_buffer.cnt) { 978 pipeunlock(wpipe); 979 goto retrywrite; 980 } 981 982 /* 983 * Transfer size is minimum of uio transfer 984 * and free space in pipe buffer. 985 */ 986 if (space > uio->uio_resid) 987 size = uio->uio_resid; 988 else 989 size = space; 990 /* 991 * First segment to transfer is minimum of 992 * transfer size and contiguous space in 993 * pipe buffer. If first segment to transfer 994 * is less than the transfer size, we've got 995 * a wraparound in the buffer. 996 */ 997 segsize = wpipe->pipe_buffer.size - 998 wpipe->pipe_buffer.in; 999 if (segsize > size) 1000 segsize = size; 1001 1002 /* Transfer first segment */ 1003 1004 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1005 segsize, uio); 1006 1007 if (error == 0 && segsize < size) { 1008 /* 1009 * Transfer remaining part now, to 1010 * support atomic writes. Wraparound 1011 * happened. 1012 */ 1013 if (wpipe->pipe_buffer.in + segsize != 1014 wpipe->pipe_buffer.size) 1015 panic("Expected pipe buffer wraparound disappeared"); 1016 1017 error = uiomove(&wpipe->pipe_buffer.buffer[0], 1018 size - segsize, uio); 1019 } 1020 if (error == 0) { 1021 wpipe->pipe_buffer.in += size; 1022 if (wpipe->pipe_buffer.in >= 1023 wpipe->pipe_buffer.size) { 1024 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 1025 panic("Expected wraparound bad"); 1026 wpipe->pipe_buffer.in = size - segsize; 1027 } 1028 1029 wpipe->pipe_buffer.cnt += size; 1030 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 1031 panic("Pipe buffer overflow"); 1032 1033 } 1034 pipeunlock(wpipe); 1035 } 1036 if (error) 1037 break; 1038 1039 } else { 1040 /* 1041 * If the "read-side" has been blocked, wake it up now 1042 * and yield to let it drain synchronously rather 1043 * then block. 1044 */ 1045 if (wpipe->pipe_state & PIPE_WANTR) { 1046 wpipe->pipe_state &= ~PIPE_WANTR; 1047 wakeup(wpipe); 1048 } 1049 1050 /* 1051 * don't block on non-blocking I/O 1052 */ 1053 if (fp->f_flag & FNONBLOCK) { 1054 error = EAGAIN; 1055 break; 1056 } 1057 1058 /* 1059 * We have no more space and have something to offer, 1060 * wake up select/poll. 1061 */ 1062 pipeselwakeup(wpipe); 1063 1064 wpipe->pipe_state |= PIPE_WANTW; 1065 error = tsleep(wpipe, PCATCH|PNORESCHED, "pipewr", 0); 1066 if (error != 0) 1067 break; 1068 /* 1069 * If read side wants to go away, we just issue a signal 1070 * to ourselves. 1071 */ 1072 if (wpipe->pipe_state & PIPE_EOF) { 1073 error = EPIPE; 1074 break; 1075 } 1076 } 1077 } 1078 1079 --wpipe->pipe_busy; 1080 1081 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1082 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1083 wakeup(wpipe); 1084 } else if (wpipe->pipe_buffer.cnt > 0) { 1085 /* 1086 * If we have put any characters in the buffer, we wake up 1087 * the reader. 1088 */ 1089 if (wpipe->pipe_state & PIPE_WANTR) { 1090 wpipe->pipe_state &= ~PIPE_WANTR; 1091 wakeup(wpipe); 1092 } 1093 } 1094 1095 /* 1096 * Don't return EPIPE if I/O was successful 1097 */ 1098 if ((wpipe->pipe_buffer.cnt == 0) && 1099 (uio->uio_resid == 0) && 1100 (error == EPIPE)) { 1101 error = 0; 1102 } 1103 1104 if (error == 0) 1105 vfs_timestamp(&wpipe->pipe_mtime); 1106 1107 /* 1108 * We have something to offer, 1109 * wake up select/poll. 1110 */ 1111 if (wpipe->pipe_buffer.cnt) 1112 pipeselwakeup(wpipe); 1113 1114 return (error); 1115 } 1116 1117 /* 1118 * we implement a very minimal set of ioctls for compatibility with sockets. 1119 */ 1120 int 1121 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct thread *td) 1122 { 1123 struct pipe *mpipe = (struct pipe *)fp->f_data; 1124 1125 switch (cmd) { 1126 1127 case FIONBIO: 1128 return (0); 1129 1130 case FIOASYNC: 1131 if (*(int *)data) { 1132 mpipe->pipe_state |= PIPE_ASYNC; 1133 } else { 1134 mpipe->pipe_state &= ~PIPE_ASYNC; 1135 } 1136 return (0); 1137 1138 case FIONREAD: 1139 if (mpipe->pipe_state & PIPE_DIRECTW) { 1140 *(int *)data = mpipe->pipe_map.xio_bytes - 1141 mpipe->pipe_buffer.out; 1142 } else { 1143 *(int *)data = mpipe->pipe_buffer.cnt; 1144 } 1145 return (0); 1146 1147 case FIOSETOWN: 1148 return (fsetown(*(int *)data, &mpipe->pipe_sigio)); 1149 1150 case FIOGETOWN: 1151 *(int *)data = fgetown(mpipe->pipe_sigio); 1152 return (0); 1153 1154 /* This is deprecated, FIOSETOWN should be used instead. */ 1155 case TIOCSPGRP: 1156 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio)); 1157 1158 /* This is deprecated, FIOGETOWN should be used instead. */ 1159 case TIOCGPGRP: 1160 *(int *)data = -fgetown(mpipe->pipe_sigio); 1161 return (0); 1162 1163 } 1164 return (ENOTTY); 1165 } 1166 1167 int 1168 pipe_poll(struct file *fp, int events, struct ucred *cred, struct thread *td) 1169 { 1170 struct pipe *rpipe = (struct pipe *)fp->f_data; 1171 struct pipe *wpipe; 1172 int revents = 0; 1173 1174 wpipe = rpipe->pipe_peer; 1175 if (events & (POLLIN | POLLRDNORM)) 1176 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1177 (rpipe->pipe_buffer.cnt > 0) || 1178 (rpipe->pipe_state & PIPE_EOF)) 1179 revents |= events & (POLLIN | POLLRDNORM); 1180 1181 if (events & (POLLOUT | POLLWRNORM)) 1182 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1183 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1184 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1185 revents |= events & (POLLOUT | POLLWRNORM); 1186 1187 if ((rpipe->pipe_state & PIPE_EOF) || 1188 (wpipe == NULL) || 1189 (wpipe->pipe_state & PIPE_EOF)) 1190 revents |= POLLHUP; 1191 1192 if (revents == 0) { 1193 if (events & (POLLIN | POLLRDNORM)) { 1194 selrecord(td, &rpipe->pipe_sel); 1195 rpipe->pipe_state |= PIPE_SEL; 1196 } 1197 1198 if (events & (POLLOUT | POLLWRNORM)) { 1199 selrecord(td, &wpipe->pipe_sel); 1200 wpipe->pipe_state |= PIPE_SEL; 1201 } 1202 } 1203 1204 return (revents); 1205 } 1206 1207 static int 1208 pipe_stat(struct file *fp, struct stat *ub, struct thread *td) 1209 { 1210 struct pipe *pipe = (struct pipe *)fp->f_data; 1211 1212 bzero((caddr_t)ub, sizeof(*ub)); 1213 ub->st_mode = S_IFIFO; 1214 ub->st_blksize = pipe->pipe_buffer.size; 1215 ub->st_size = pipe->pipe_buffer.cnt; 1216 if (ub->st_size == 0 && (pipe->pipe_state & PIPE_DIRECTW)) { 1217 ub->st_size = pipe->pipe_map.xio_bytes - 1218 pipe->pipe_buffer.out; 1219 } 1220 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1221 ub->st_atimespec = pipe->pipe_atime; 1222 ub->st_mtimespec = pipe->pipe_mtime; 1223 ub->st_ctimespec = pipe->pipe_ctime; 1224 /* 1225 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1226 * st_flags, st_gen. 1227 * XXX (st_dev, st_ino) should be unique. 1228 */ 1229 return (0); 1230 } 1231 1232 /* ARGSUSED */ 1233 static int 1234 pipe_close(struct file *fp, struct thread *td) 1235 { 1236 struct pipe *cpipe = (struct pipe *)fp->f_data; 1237 1238 fp->f_ops = &badfileops; 1239 fp->f_data = NULL; 1240 funsetown(cpipe->pipe_sigio); 1241 pipeclose(cpipe); 1242 return (0); 1243 } 1244 1245 static void 1246 pipe_free_kmem(struct pipe *cpipe) 1247 { 1248 if (cpipe->pipe_buffer.buffer != NULL) { 1249 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1250 --pipe_nbig; 1251 kmem_free(kernel_map, 1252 (vm_offset_t)cpipe->pipe_buffer.buffer, 1253 cpipe->pipe_buffer.size); 1254 cpipe->pipe_buffer.buffer = NULL; 1255 cpipe->pipe_buffer.object = NULL; 1256 } 1257 #ifndef PIPE_NODIRECT 1258 KKASSERT(cpipe->pipe_map.xio_bytes == 0 && 1259 cpipe->pipe_map.xio_offset == 0 && 1260 cpipe->pipe_map.xio_npages == 0); 1261 #endif 1262 } 1263 1264 /* 1265 * shutdown the pipe 1266 */ 1267 static void 1268 pipeclose(struct pipe *cpipe) 1269 { 1270 globaldata_t gd; 1271 struct pipe *ppipe; 1272 1273 if (cpipe == NULL) 1274 return; 1275 1276 pipeselwakeup(cpipe); 1277 1278 /* 1279 * If the other side is blocked, wake it up saying that 1280 * we want to close it down. 1281 */ 1282 while (cpipe->pipe_busy) { 1283 wakeup(cpipe); 1284 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1285 tsleep(cpipe, 0, "pipecl", 0); 1286 } 1287 1288 /* 1289 * Disconnect from peer 1290 */ 1291 if ((ppipe = cpipe->pipe_peer) != NULL) { 1292 pipeselwakeup(ppipe); 1293 1294 ppipe->pipe_state |= PIPE_EOF; 1295 wakeup(ppipe); 1296 KNOTE(&ppipe->pipe_sel.si_note, 0); 1297 ppipe->pipe_peer = NULL; 1298 } 1299 1300 if (cpipe->pipe_kva) { 1301 pmap_qremove(cpipe->pipe_kva, XIO_INTERNAL_PAGES); 1302 kmem_free(kernel_map, cpipe->pipe_kva, XIO_INTERNAL_SIZE); 1303 cpipe->pipe_kva = NULL; 1304 } 1305 1306 /* 1307 * free or cache resources 1308 */ 1309 gd = mycpu; 1310 if (gd->gd_pipeqcount >= pipe_maxcache || 1311 cpipe->pipe_buffer.size != PIPE_SIZE 1312 ) { 1313 pipe_free_kmem(cpipe); 1314 free(cpipe, M_PIPE); 1315 } else { 1316 KKASSERT(cpipe->pipe_map.xio_npages == 0 && 1317 cpipe->pipe_map.xio_bytes == 0 && 1318 cpipe->pipe_map.xio_offset == 0); 1319 cpipe->pipe_state = 0; 1320 cpipe->pipe_busy = 0; 1321 cpipe->pipe_peer = gd->gd_pipeq; 1322 gd->gd_pipeq = cpipe; 1323 ++gd->gd_pipeqcount; 1324 } 1325 } 1326 1327 /*ARGSUSED*/ 1328 static int 1329 pipe_kqfilter(struct file *fp, struct knote *kn) 1330 { 1331 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data; 1332 1333 switch (kn->kn_filter) { 1334 case EVFILT_READ: 1335 kn->kn_fop = &pipe_rfiltops; 1336 break; 1337 case EVFILT_WRITE: 1338 kn->kn_fop = &pipe_wfiltops; 1339 cpipe = cpipe->pipe_peer; 1340 if (cpipe == NULL) 1341 /* other end of pipe has been closed */ 1342 return (EPIPE); 1343 break; 1344 default: 1345 return (1); 1346 } 1347 kn->kn_hook = (caddr_t)cpipe; 1348 1349 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1350 return (0); 1351 } 1352 1353 static void 1354 filt_pipedetach(struct knote *kn) 1355 { 1356 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1357 1358 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1359 } 1360 1361 /*ARGSUSED*/ 1362 static int 1363 filt_piperead(struct knote *kn, long hint) 1364 { 1365 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1366 struct pipe *wpipe = rpipe->pipe_peer; 1367 1368 kn->kn_data = rpipe->pipe_buffer.cnt; 1369 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) { 1370 kn->kn_data = rpipe->pipe_map.xio_bytes - 1371 rpipe->pipe_buffer.out; 1372 } 1373 1374 if ((rpipe->pipe_state & PIPE_EOF) || 1375 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1376 kn->kn_flags |= EV_EOF; 1377 return (1); 1378 } 1379 return (kn->kn_data > 0); 1380 } 1381 1382 /*ARGSUSED*/ 1383 static int 1384 filt_pipewrite(struct knote *kn, long hint) 1385 { 1386 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1387 struct pipe *wpipe = rpipe->pipe_peer; 1388 1389 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1390 kn->kn_data = 0; 1391 kn->kn_flags |= EV_EOF; 1392 return (1); 1393 } 1394 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1395 if (wpipe->pipe_state & PIPE_DIRECTW) 1396 kn->kn_data = 0; 1397 1398 return (kn->kn_data >= PIPE_BUF); 1399 } 1400