1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/fcntl.h> 34 #include <sys/file.h> 35 #include <sys/filedesc.h> 36 #include <sys/filio.h> 37 #include <sys/ttycom.h> 38 #include <sys/stat.h> 39 #include <sys/poll.h> 40 #include <sys/select.h> 41 #include <sys/signalvar.h> 42 #include <sys/sysproto.h> 43 #include <sys/pipe.h> 44 #include <sys/vnode.h> 45 #include <sys/uio.h> 46 #include <sys/event.h> 47 #include <sys/globaldata.h> 48 #include <sys/module.h> 49 #include <sys/malloc.h> 50 #include <sys/sysctl.h> 51 #include <sys/socket.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_extern.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_zone.h> 63 64 #include <sys/file2.h> 65 #include <sys/signal2.h> 66 67 #include <machine/cpufunc.h> 68 69 /* 70 * interfaces to the outside world 71 */ 72 static int pipe_read (struct file *fp, struct uio *uio, 73 struct ucred *cred, int flags); 74 static int pipe_write (struct file *fp, struct uio *uio, 75 struct ucred *cred, int flags); 76 static int pipe_close (struct file *fp); 77 static int pipe_shutdown (struct file *fp, int how); 78 static int pipe_poll (struct file *fp, int events, struct ucred *cred); 79 static int pipe_kqfilter (struct file *fp, struct knote *kn); 80 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 81 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, 82 struct ucred *cred, struct sysmsg *msg); 83 84 static struct fileops pipeops = { 85 .fo_read = pipe_read, 86 .fo_write = pipe_write, 87 .fo_ioctl = pipe_ioctl, 88 .fo_poll = pipe_poll, 89 .fo_kqfilter = pipe_kqfilter, 90 .fo_stat = pipe_stat, 91 .fo_close = pipe_close, 92 .fo_shutdown = pipe_shutdown 93 }; 94 95 static void filt_pipedetach(struct knote *kn); 96 static int filt_piperead(struct knote *kn, long hint); 97 static int filt_pipewrite(struct knote *kn, long hint); 98 99 static struct filterops pipe_rfiltops = 100 { 1, NULL, filt_pipedetach, filt_piperead }; 101 static struct filterops pipe_wfiltops = 102 { 1, NULL, filt_pipedetach, filt_pipewrite }; 103 104 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 105 106 /* 107 * Default pipe buffer size(s), this can be kind-of large now because pipe 108 * space is pageable. The pipe code will try to maintain locality of 109 * reference for performance reasons, so small amounts of outstanding I/O 110 * will not wipe the cache. 111 */ 112 #define MINPIPESIZE (PIPE_SIZE/3) 113 #define MAXPIPESIZE (2*PIPE_SIZE/3) 114 115 /* 116 * Limit the number of "big" pipes 117 */ 118 #define LIMITBIGPIPES 64 119 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 120 121 static int pipe_maxbig = LIMITBIGPIPES; 122 static int pipe_maxcache = PIPEQ_MAX_CACHE; 123 static int pipe_bigcount; 124 static int pipe_nbig; 125 static int pipe_bcache_alloc; 126 static int pipe_bkmem_alloc; 127 static int pipe_rblocked_count; 128 static int pipe_wblocked_count; 129 130 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 131 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 132 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 133 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount, 134 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded"); 135 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked, 136 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded"); 137 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked, 138 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded"); 139 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 140 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 141 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 142 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 143 #ifdef SMP 144 static int pipe_delay = 5000; /* 5uS default */ 145 SYSCTL_INT(_kern_pipe, OID_AUTO, delay, 146 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns"); 147 static int pipe_mpsafe = 1; 148 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe, 149 CTLFLAG_RW, &pipe_mpsafe, 0, ""); 150 #endif 151 #if !defined(NO_PIPE_SYSCTL_STATS) 152 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 153 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 154 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 155 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 156 #endif 157 158 static void pipeclose (struct pipe *cpipe); 159 static void pipe_free_kmem (struct pipe *cpipe); 160 static int pipe_create (struct pipe **cpipep); 161 static __inline void pipeselwakeup (struct pipe *cpipe); 162 static int pipespace (struct pipe *cpipe, int size); 163 164 static __inline void 165 pipeselwakeup(struct pipe *cpipe) 166 { 167 if (cpipe->pipe_state & PIPE_SEL) { 168 get_mplock(); 169 cpipe->pipe_state &= ~PIPE_SEL; 170 selwakeup(&cpipe->pipe_sel); 171 rel_mplock(); 172 } 173 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) { 174 get_mplock(); 175 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 176 rel_mplock(); 177 } 178 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) { 179 get_mplock(); 180 KNOTE(&cpipe->pipe_sel.si_note, 0); 181 rel_mplock(); 182 } 183 } 184 185 /* 186 * These routines are called before and after a UIO. The UIO 187 * may block, causing our held tokens to be lost temporarily. 188 * 189 * We use these routines to serialize reads against other reads 190 * and writes against other writes. 191 * 192 * The read token is held on entry so *ipp does not race. 193 */ 194 static __inline int 195 pipe_start_uio(struct pipe *cpipe, int *ipp) 196 { 197 int error; 198 199 while (*ipp) { 200 *ipp = -1; 201 error = tsleep(ipp, PCATCH, "pipexx", 0); 202 if (error) 203 return (error); 204 } 205 *ipp = 1; 206 return (0); 207 } 208 209 static __inline void 210 pipe_end_uio(struct pipe *cpipe, int *ipp) 211 { 212 if (*ipp < 0) { 213 *ipp = 0; 214 wakeup(ipp); 215 } else { 216 KKASSERT(*ipp > 0); 217 *ipp = 0; 218 } 219 } 220 221 static __inline void 222 pipe_get_mplock(int *save) 223 { 224 #ifdef SMP 225 if (pipe_mpsafe == 0) { 226 get_mplock(); 227 *save = 1; 228 } else 229 #endif 230 { 231 *save = 0; 232 } 233 } 234 235 static __inline void 236 pipe_rel_mplock(int *save) 237 { 238 #ifdef SMP 239 if (*save) 240 rel_mplock(); 241 #endif 242 } 243 244 245 /* 246 * The pipe system call for the DTYPE_PIPE type of pipes 247 * 248 * pipe_ARgs(int dummy) 249 */ 250 251 /* ARGSUSED */ 252 int 253 sys_pipe(struct pipe_args *uap) 254 { 255 struct thread *td = curthread; 256 struct proc *p = td->td_proc; 257 struct file *rf, *wf; 258 struct pipe *rpipe, *wpipe; 259 int fd1, fd2, error; 260 261 KKASSERT(p); 262 263 rpipe = wpipe = NULL; 264 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 265 pipeclose(rpipe); 266 pipeclose(wpipe); 267 return (ENFILE); 268 } 269 270 error = falloc(p, &rf, &fd1); 271 if (error) { 272 pipeclose(rpipe); 273 pipeclose(wpipe); 274 return (error); 275 } 276 uap->sysmsg_fds[0] = fd1; 277 278 /* 279 * Warning: once we've gotten past allocation of the fd for the 280 * read-side, we can only drop the read side via fdrop() in order 281 * to avoid races against processes which manage to dup() the read 282 * side while we are blocked trying to allocate the write side. 283 */ 284 rf->f_type = DTYPE_PIPE; 285 rf->f_flag = FREAD | FWRITE; 286 rf->f_ops = &pipeops; 287 rf->f_data = rpipe; 288 error = falloc(p, &wf, &fd2); 289 if (error) { 290 fsetfd(p, NULL, fd1); 291 fdrop(rf); 292 /* rpipe has been closed by fdrop(). */ 293 pipeclose(wpipe); 294 return (error); 295 } 296 wf->f_type = DTYPE_PIPE; 297 wf->f_flag = FREAD | FWRITE; 298 wf->f_ops = &pipeops; 299 wf->f_data = wpipe; 300 uap->sysmsg_fds[1] = fd2; 301 302 rpipe->pipe_slock = kmalloc(sizeof(struct lock), 303 M_PIPE, M_WAITOK|M_ZERO); 304 wpipe->pipe_slock = rpipe->pipe_slock; 305 rpipe->pipe_peer = wpipe; 306 wpipe->pipe_peer = rpipe; 307 lockinit(rpipe->pipe_slock, "pipecl", 0, 0); 308 309 /* 310 * Once activated the peer relationship remains valid until 311 * both sides are closed. 312 */ 313 fsetfd(p, rf, fd1); 314 fsetfd(p, wf, fd2); 315 fdrop(rf); 316 fdrop(wf); 317 318 return (0); 319 } 320 321 /* 322 * Allocate kva for pipe circular buffer, the space is pageable 323 * This routine will 'realloc' the size of a pipe safely, if it fails 324 * it will retain the old buffer. 325 * If it fails it will return ENOMEM. 326 */ 327 static int 328 pipespace(struct pipe *cpipe, int size) 329 { 330 struct vm_object *object; 331 caddr_t buffer; 332 int npages, error; 333 334 npages = round_page(size) / PAGE_SIZE; 335 object = cpipe->pipe_buffer.object; 336 337 /* 338 * [re]create the object if necessary and reserve space for it 339 * in the kernel_map. The object and memory are pageable. On 340 * success, free the old resources before assigning the new 341 * ones. 342 */ 343 if (object == NULL || object->size != npages) { 344 get_mplock(); 345 object = vm_object_allocate(OBJT_DEFAULT, npages); 346 buffer = (caddr_t)vm_map_min(&kernel_map); 347 348 error = vm_map_find(&kernel_map, object, 0, 349 (vm_offset_t *)&buffer, size, 350 1, 351 VM_MAPTYPE_NORMAL, 352 VM_PROT_ALL, VM_PROT_ALL, 353 0); 354 355 if (error != KERN_SUCCESS) { 356 vm_object_deallocate(object); 357 rel_mplock(); 358 return (ENOMEM); 359 } 360 pipe_free_kmem(cpipe); 361 rel_mplock(); 362 cpipe->pipe_buffer.object = object; 363 cpipe->pipe_buffer.buffer = buffer; 364 cpipe->pipe_buffer.size = size; 365 ++pipe_bkmem_alloc; 366 } else { 367 ++pipe_bcache_alloc; 368 } 369 cpipe->pipe_buffer.rindex = 0; 370 cpipe->pipe_buffer.windex = 0; 371 return (0); 372 } 373 374 /* 375 * Initialize and allocate VM and memory for pipe, pulling the pipe from 376 * our per-cpu cache if possible. For now make sure it is sized for the 377 * smaller PIPE_SIZE default. 378 */ 379 static int 380 pipe_create(struct pipe **cpipep) 381 { 382 globaldata_t gd = mycpu; 383 struct pipe *cpipe; 384 int error; 385 386 if ((cpipe = gd->gd_pipeq) != NULL) { 387 gd->gd_pipeq = cpipe->pipe_peer; 388 --gd->gd_pipeqcount; 389 cpipe->pipe_peer = NULL; 390 cpipe->pipe_wantwcnt = 0; 391 } else { 392 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 393 } 394 *cpipep = cpipe; 395 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 396 return (error); 397 vfs_timestamp(&cpipe->pipe_ctime); 398 cpipe->pipe_atime = cpipe->pipe_ctime; 399 cpipe->pipe_mtime = cpipe->pipe_ctime; 400 lwkt_token_init(&cpipe->pipe_rlock); 401 lwkt_token_init(&cpipe->pipe_wlock); 402 return (0); 403 } 404 405 /* 406 * MPALMOSTSAFE (acquires mplock) 407 */ 408 static int 409 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 410 { 411 struct pipe *rpipe; 412 int error; 413 size_t nread = 0; 414 int nbio; 415 u_int size; /* total bytes available */ 416 u_int nsize; /* total bytes to read */ 417 u_int rindex; /* contiguous bytes available */ 418 int notify_writer; 419 lwkt_tokref rlock; 420 lwkt_tokref wlock; 421 int mpsave; 422 int bigread; 423 int bigcount; 424 425 if (uio->uio_resid == 0) 426 return(0); 427 428 /* 429 * Setup locks, calculate nbio 430 */ 431 pipe_get_mplock(&mpsave); 432 rpipe = (struct pipe *)fp->f_data; 433 lwkt_gettoken(&rlock, &rpipe->pipe_rlock); 434 435 if (fflags & O_FBLOCKING) 436 nbio = 0; 437 else if (fflags & O_FNONBLOCKING) 438 nbio = 1; 439 else if (fp->f_flag & O_NONBLOCK) 440 nbio = 1; 441 else 442 nbio = 0; 443 444 /* 445 * Reads are serialized. Note howeverthat pipe_buffer.buffer and 446 * pipe_buffer.size can change out from under us when the number 447 * of bytes in the buffer are zero due to the write-side doing a 448 * pipespace(). 449 */ 450 error = pipe_start_uio(rpipe, &rpipe->pipe_rip); 451 if (error) { 452 pipe_rel_mplock(&mpsave); 453 lwkt_reltoken(&rlock); 454 return (error); 455 } 456 notify_writer = 0; 457 458 bigread = (uio->uio_resid > 10 * 1024 * 1024); 459 bigcount = 10; 460 461 while (uio->uio_resid) { 462 /* 463 * Don't hog the cpu. 464 */ 465 if (bigread && --bigcount == 0) { 466 lwkt_user_yield(); 467 bigcount = 10; 468 if (CURSIG(curthread->td_lwp)) { 469 error = EINTR; 470 break; 471 } 472 } 473 474 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 475 cpu_lfence(); 476 if (size) { 477 rindex = rpipe->pipe_buffer.rindex & 478 (rpipe->pipe_buffer.size - 1); 479 nsize = size; 480 if (nsize > rpipe->pipe_buffer.size - rindex) 481 nsize = rpipe->pipe_buffer.size - rindex; 482 nsize = szmin(nsize, uio->uio_resid); 483 484 error = uiomove(&rpipe->pipe_buffer.buffer[rindex], 485 nsize, uio); 486 if (error) 487 break; 488 cpu_mfence(); 489 rpipe->pipe_buffer.rindex += nsize; 490 nread += nsize; 491 492 /* 493 * If the FIFO is still over half full just continue 494 * and do not try to notify the writer yet. 495 */ 496 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) { 497 notify_writer = 0; 498 continue; 499 } 500 501 /* 502 * When the FIFO is less then half full notify any 503 * waiting writer. WANTW can be checked while 504 * holding just the rlock. 505 */ 506 notify_writer = 1; 507 if ((rpipe->pipe_state & PIPE_WANTW) == 0) 508 continue; 509 } 510 511 /* 512 * If the "write-side" was blocked we wake it up. This code 513 * is reached either when the buffer is completely emptied 514 * or if it becomes more then half-empty. 515 * 516 * Pipe_state can only be modified if both the rlock and 517 * wlock are held. 518 */ 519 if (rpipe->pipe_state & PIPE_WANTW) { 520 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 521 if (rpipe->pipe_state & PIPE_WANTW) { 522 notify_writer = 0; 523 rpipe->pipe_state &= ~PIPE_WANTW; 524 lwkt_reltoken(&wlock); 525 wakeup(rpipe); 526 } else { 527 lwkt_reltoken(&wlock); 528 } 529 } 530 531 /* 532 * Pick up our copy loop again if the writer sent data to 533 * us while we were messing around. 534 * 535 * On a SMP box poll up to pipe_delay nanoseconds for new 536 * data. Typically a value of 2000 to 4000 is sufficient 537 * to eradicate most IPIs/tsleeps/wakeups when a pipe 538 * is used for synchronous communications with small packets, 539 * and 8000 or so (8uS) will pipeline large buffer xfers 540 * between cpus over a pipe. 541 * 542 * For synchronous communications a hit means doing a 543 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS, 544 * where as miss requiring a tsleep/wakeup sequence 545 * will take 7uS or more. 546 */ 547 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) 548 continue; 549 550 #if defined(SMP) && defined(_RDTSC_SUPPORTED_) 551 if (pipe_delay) { 552 int64_t tsc_target; 553 int good = 0; 554 555 tsc_target = tsc_get_target(pipe_delay); 556 while (tsc_test_target(tsc_target) == 0) { 557 if (rpipe->pipe_buffer.windex != 558 rpipe->pipe_buffer.rindex) { 559 good = 1; 560 break; 561 } 562 } 563 if (good) 564 continue; 565 } 566 #endif 567 568 /* 569 * Detect EOF condition, do not set error. 570 */ 571 if (rpipe->pipe_state & PIPE_REOF) 572 break; 573 574 /* 575 * Break if some data was read, or if this was a non-blocking 576 * read. 577 */ 578 if (nread > 0) 579 break; 580 581 if (nbio) { 582 error = EAGAIN; 583 break; 584 } 585 586 /* 587 * Last chance, interlock with WANTR. 588 */ 589 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 590 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 591 if (size) { 592 lwkt_reltoken(&wlock); 593 continue; 594 } 595 596 /* 597 * Retest EOF - acquiring a new token can temporarily release 598 * tokens already held. 599 */ 600 if (rpipe->pipe_state & PIPE_REOF) { 601 lwkt_reltoken(&wlock); 602 break; 603 } 604 605 /* 606 * If there is no more to read in the pipe, reset its 607 * pointers to the beginning. This improves cache hit 608 * stats. 609 * 610 * We need both locks to modify both pointers, and there 611 * must also not be a write in progress or the uiomove() 612 * in the write might block and temporarily release 613 * its wlock, then reacquire and update windex. We are 614 * only serialized against reads, not writes. 615 * 616 * XXX should we even bother resetting the indices? It 617 * might actually be more cache efficient not to. 618 */ 619 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex && 620 rpipe->pipe_wip == 0) { 621 rpipe->pipe_buffer.rindex = 0; 622 rpipe->pipe_buffer.windex = 0; 623 } 624 625 /* 626 * Wait for more data. 627 * 628 * Pipe_state can only be set if both the rlock and wlock 629 * are held. 630 */ 631 rpipe->pipe_state |= PIPE_WANTR; 632 tsleep_interlock(rpipe, PCATCH); 633 lwkt_reltoken(&wlock); 634 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0); 635 ++pipe_rblocked_count; 636 if (error) 637 break; 638 } 639 pipe_end_uio(rpipe, &rpipe->pipe_rip); 640 641 /* 642 * Uptime last access time 643 */ 644 if (error == 0 && nread) 645 vfs_timestamp(&rpipe->pipe_atime); 646 647 /* 648 * If we drained the FIFO more then half way then handle 649 * write blocking hysteresis. 650 * 651 * Note that PIPE_WANTW cannot be set by the writer without 652 * it holding both rlock and wlock, so we can test it 653 * while holding just rlock. 654 */ 655 if (notify_writer) { 656 if (rpipe->pipe_state & PIPE_WANTW) { 657 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 658 if (rpipe->pipe_state & PIPE_WANTW) { 659 rpipe->pipe_state &= ~PIPE_WANTW; 660 lwkt_reltoken(&wlock); 661 wakeup(rpipe); 662 } else { 663 lwkt_reltoken(&wlock); 664 } 665 } 666 if (rpipe->pipe_state & PIPE_SEL) { 667 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 668 pipeselwakeup(rpipe); 669 lwkt_reltoken(&wlock); 670 } 671 } 672 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/ 673 lwkt_reltoken(&rlock); 674 675 pipe_rel_mplock(&mpsave); 676 return (error); 677 } 678 679 /* 680 * MPALMOSTSAFE - acquires mplock 681 */ 682 static int 683 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 684 { 685 int error; 686 int orig_resid; 687 int nbio; 688 struct pipe *wpipe, *rpipe; 689 lwkt_tokref rlock; 690 lwkt_tokref wlock; 691 u_int windex; 692 u_int space; 693 u_int wcount; 694 int mpsave; 695 int bigwrite; 696 int bigcount; 697 698 pipe_get_mplock(&mpsave); 699 700 /* 701 * Writes go to the peer. The peer will always exist. 702 */ 703 rpipe = (struct pipe *) fp->f_data; 704 wpipe = rpipe->pipe_peer; 705 lwkt_gettoken(&wlock, &wpipe->pipe_wlock); 706 if (wpipe->pipe_state & PIPE_WEOF) { 707 pipe_rel_mplock(&mpsave); 708 lwkt_reltoken(&wlock); 709 return (EPIPE); 710 } 711 712 /* 713 * Degenerate case (EPIPE takes prec) 714 */ 715 if (uio->uio_resid == 0) { 716 pipe_rel_mplock(&mpsave); 717 lwkt_reltoken(&wlock); 718 return(0); 719 } 720 721 /* 722 * Writes are serialized (start_uio must be called with wlock) 723 */ 724 error = pipe_start_uio(wpipe, &wpipe->pipe_wip); 725 if (error) { 726 pipe_rel_mplock(&mpsave); 727 lwkt_reltoken(&wlock); 728 return (error); 729 } 730 731 if (fflags & O_FBLOCKING) 732 nbio = 0; 733 else if (fflags & O_FNONBLOCKING) 734 nbio = 1; 735 else if (fp->f_flag & O_NONBLOCK) 736 nbio = 1; 737 else 738 nbio = 0; 739 740 /* 741 * If it is advantageous to resize the pipe buffer, do 742 * so. We are write-serialized so we can block safely. 743 */ 744 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 745 (pipe_nbig < pipe_maxbig) && 746 wpipe->pipe_wantwcnt > 4 && 747 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 748 /* 749 * Recheck after lock. 750 */ 751 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 752 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 753 (pipe_nbig < pipe_maxbig) && 754 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 755 atomic_add_int(&pipe_nbig, 1); 756 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 757 ++pipe_bigcount; 758 else 759 atomic_subtract_int(&pipe_nbig, 1); 760 } 761 lwkt_reltoken(&rlock); 762 } 763 764 orig_resid = uio->uio_resid; 765 wcount = 0; 766 767 bigwrite = (uio->uio_resid > 10 * 1024 * 1024); 768 bigcount = 10; 769 770 while (uio->uio_resid) { 771 if (wpipe->pipe_state & PIPE_WEOF) { 772 error = EPIPE; 773 break; 774 } 775 776 /* 777 * Don't hog the cpu. 778 */ 779 if (bigwrite && --bigcount == 0) { 780 lwkt_user_yield(); 781 bigcount = 10; 782 if (CURSIG(curthread->td_lwp)) { 783 error = EINTR; 784 break; 785 } 786 } 787 788 windex = wpipe->pipe_buffer.windex & 789 (wpipe->pipe_buffer.size - 1); 790 space = wpipe->pipe_buffer.size - 791 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 792 cpu_lfence(); 793 794 /* Writes of size <= PIPE_BUF must be atomic. */ 795 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 796 space = 0; 797 798 /* 799 * Write to fill, read size handles write hysteresis. Also 800 * additional restrictions can cause select-based non-blocking 801 * writes to spin. 802 */ 803 if (space > 0) { 804 u_int segsize; 805 806 /* 807 * Transfer size is minimum of uio transfer 808 * and free space in pipe buffer. 809 * 810 * Limit each uiocopy to no more then PIPE_SIZE 811 * so we can keep the gravy train going on a 812 * SMP box. This doubles the performance for 813 * write sizes > 16K. Otherwise large writes 814 * wind up doing an inefficient synchronous 815 * ping-pong. 816 */ 817 space = szmin(space, uio->uio_resid); 818 if (space > PIPE_SIZE) 819 space = PIPE_SIZE; 820 821 /* 822 * First segment to transfer is minimum of 823 * transfer size and contiguous space in 824 * pipe buffer. If first segment to transfer 825 * is less than the transfer size, we've got 826 * a wraparound in the buffer. 827 */ 828 segsize = wpipe->pipe_buffer.size - windex; 829 if (segsize > space) 830 segsize = space; 831 832 #ifdef SMP 833 /* 834 * If this is the first loop and the reader is 835 * blocked, do a preemptive wakeup of the reader. 836 * 837 * On SMP the IPI latency plus the wlock interlock 838 * on the reader side is the fastest way to get the 839 * reader going. (The scheduler will hard loop on 840 * lock tokens). 841 * 842 * NOTE: We can't clear WANTR here without acquiring 843 * the rlock, which we don't want to do here! 844 */ 845 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1) 846 wakeup(wpipe); 847 #endif 848 849 /* 850 * Transfer segment, which may include a wrap-around. 851 * Update windex to account for both all in one go 852 * so the reader can read() the data atomically. 853 */ 854 error = uiomove(&wpipe->pipe_buffer.buffer[windex], 855 segsize, uio); 856 if (error == 0 && segsize < space) { 857 segsize = space - segsize; 858 error = uiomove(&wpipe->pipe_buffer.buffer[0], 859 segsize, uio); 860 } 861 if (error) 862 break; 863 cpu_mfence(); 864 wpipe->pipe_buffer.windex += space; 865 wcount += space; 866 continue; 867 } 868 869 /* 870 * We need both the rlock and the wlock to interlock against 871 * the EOF, WANTW, and size checks, and to modify pipe_state. 872 * 873 * These are token locks so we do not have to worry about 874 * deadlocks. 875 */ 876 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 877 878 /* 879 * If the "read-side" has been blocked, wake it up now 880 * and yield to let it drain synchronously rather 881 * then block. 882 */ 883 if (wpipe->pipe_state & PIPE_WANTR) { 884 wpipe->pipe_state &= ~PIPE_WANTR; 885 wakeup(wpipe); 886 } 887 888 /* 889 * don't block on non-blocking I/O 890 */ 891 if (nbio) { 892 lwkt_reltoken(&rlock); 893 error = EAGAIN; 894 break; 895 } 896 897 /* 898 * re-test whether we have to block in the writer after 899 * acquiring both locks, in case the reader opened up 900 * some space. 901 */ 902 space = wpipe->pipe_buffer.size - 903 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 904 cpu_lfence(); 905 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 906 space = 0; 907 908 /* 909 * Retest EOF - acquiring a new token can temporarily release 910 * tokens already held. 911 */ 912 if (wpipe->pipe_state & PIPE_WEOF) { 913 lwkt_reltoken(&rlock); 914 error = EPIPE; 915 break; 916 } 917 918 /* 919 * We have no more space and have something to offer, 920 * wake up select/poll. 921 */ 922 if (space == 0) { 923 wpipe->pipe_state |= PIPE_WANTW; 924 ++wpipe->pipe_wantwcnt; 925 pipeselwakeup(wpipe); 926 if (wpipe->pipe_state & PIPE_WANTW) 927 error = tsleep(wpipe, PCATCH, "pipewr", 0); 928 ++pipe_wblocked_count; 929 } 930 lwkt_reltoken(&rlock); 931 932 /* 933 * Break out if we errored or the read side wants us to go 934 * away. 935 */ 936 if (error) 937 break; 938 if (wpipe->pipe_state & PIPE_WEOF) { 939 error = EPIPE; 940 break; 941 } 942 } 943 pipe_end_uio(wpipe, &wpipe->pipe_wip); 944 945 /* 946 * If we have put any characters in the buffer, we wake up 947 * the reader. 948 * 949 * Both rlock and wlock are required to be able to modify pipe_state. 950 */ 951 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) { 952 if (wpipe->pipe_state & PIPE_WANTR) { 953 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 954 if (wpipe->pipe_state & PIPE_WANTR) { 955 wpipe->pipe_state &= ~PIPE_WANTR; 956 lwkt_reltoken(&rlock); 957 wakeup(wpipe); 958 } else { 959 lwkt_reltoken(&rlock); 960 } 961 } 962 if (wpipe->pipe_state & PIPE_SEL) { 963 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 964 pipeselwakeup(wpipe); 965 lwkt_reltoken(&rlock); 966 } 967 } 968 969 /* 970 * Don't return EPIPE if I/O was successful 971 */ 972 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) && 973 (uio->uio_resid == 0) && 974 (error == EPIPE)) { 975 error = 0; 976 } 977 978 if (error == 0) 979 vfs_timestamp(&wpipe->pipe_mtime); 980 981 /* 982 * We have something to offer, 983 * wake up select/poll. 984 */ 985 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/ 986 lwkt_reltoken(&wlock); 987 pipe_rel_mplock(&mpsave); 988 return (error); 989 } 990 991 /* 992 * MPALMOSTSAFE - acquires mplock 993 * 994 * we implement a very minimal set of ioctls for compatibility with sockets. 995 */ 996 int 997 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, 998 struct ucred *cred, struct sysmsg *msg) 999 { 1000 struct pipe *mpipe; 1001 lwkt_tokref rlock; 1002 lwkt_tokref wlock; 1003 int error; 1004 int mpsave; 1005 1006 pipe_get_mplock(&mpsave); 1007 mpipe = (struct pipe *)fp->f_data; 1008 1009 lwkt_gettoken(&rlock, &mpipe->pipe_rlock); 1010 lwkt_gettoken(&wlock, &mpipe->pipe_wlock); 1011 1012 switch (cmd) { 1013 case FIOASYNC: 1014 if (*(int *)data) { 1015 mpipe->pipe_state |= PIPE_ASYNC; 1016 } else { 1017 mpipe->pipe_state &= ~PIPE_ASYNC; 1018 } 1019 error = 0; 1020 break; 1021 case FIONREAD: 1022 *(int *)data = mpipe->pipe_buffer.windex - 1023 mpipe->pipe_buffer.rindex; 1024 error = 0; 1025 break; 1026 case FIOSETOWN: 1027 get_mplock(); 1028 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1029 rel_mplock(); 1030 break; 1031 case FIOGETOWN: 1032 *(int *)data = fgetown(mpipe->pipe_sigio); 1033 error = 0; 1034 break; 1035 case TIOCSPGRP: 1036 /* This is deprecated, FIOSETOWN should be used instead. */ 1037 get_mplock(); 1038 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1039 rel_mplock(); 1040 break; 1041 1042 case TIOCGPGRP: 1043 /* This is deprecated, FIOGETOWN should be used instead. */ 1044 *(int *)data = -fgetown(mpipe->pipe_sigio); 1045 error = 0; 1046 break; 1047 default: 1048 error = ENOTTY; 1049 break; 1050 } 1051 lwkt_reltoken(&rlock); 1052 lwkt_reltoken(&wlock); 1053 pipe_rel_mplock(&mpsave); 1054 1055 return (error); 1056 } 1057 1058 /* 1059 * MPALMOSTSAFE - acquires mplock 1060 * 1061 * poll for events (helper) 1062 */ 1063 static int 1064 pipe_poll_events(struct pipe *rpipe, struct pipe *wpipe, int events) 1065 { 1066 int revents = 0; 1067 u_int space; 1068 1069 if (events & (POLLIN | POLLRDNORM)) { 1070 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) || 1071 (rpipe->pipe_state & PIPE_REOF)) { 1072 revents |= events & (POLLIN | POLLRDNORM); 1073 } 1074 } 1075 1076 if (events & (POLLOUT | POLLWRNORM)) { 1077 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) { 1078 revents |= events & (POLLOUT | POLLWRNORM); 1079 } else { 1080 space = wpipe->pipe_buffer.windex - 1081 wpipe->pipe_buffer.rindex; 1082 space = wpipe->pipe_buffer.size - space; 1083 if (space >= PIPE_BUF) 1084 revents |= events & (POLLOUT | POLLWRNORM); 1085 } 1086 } 1087 1088 if ((rpipe->pipe_state & PIPE_REOF) || 1089 (wpipe == NULL) || 1090 (wpipe->pipe_state & PIPE_WEOF)) { 1091 revents |= POLLHUP; 1092 } 1093 return (revents); 1094 } 1095 1096 /* 1097 * Poll for events from file pointer. 1098 */ 1099 int 1100 pipe_poll(struct file *fp, int events, struct ucred *cred) 1101 { 1102 lwkt_tokref rpipe_rlock; 1103 lwkt_tokref rpipe_wlock; 1104 lwkt_tokref wpipe_rlock; 1105 lwkt_tokref wpipe_wlock; 1106 struct pipe *rpipe; 1107 struct pipe *wpipe; 1108 int revents = 0; 1109 int mpsave; 1110 1111 pipe_get_mplock(&mpsave); 1112 rpipe = (struct pipe *)fp->f_data; 1113 wpipe = rpipe->pipe_peer; 1114 1115 revents = pipe_poll_events(rpipe, wpipe, events); 1116 if (revents == 0) { 1117 if (events & (POLLIN | POLLRDNORM)) { 1118 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); 1119 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); 1120 } 1121 if (events & (POLLOUT | POLLWRNORM)) { 1122 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); 1123 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); 1124 } 1125 revents = pipe_poll_events(rpipe, wpipe, events); 1126 if (revents == 0) { 1127 if (events & (POLLIN | POLLRDNORM)) { 1128 selrecord(curthread, &rpipe->pipe_sel); 1129 rpipe->pipe_state |= PIPE_SEL; 1130 } 1131 1132 if (events & (POLLOUT | POLLWRNORM)) { 1133 selrecord(curthread, &wpipe->pipe_sel); 1134 wpipe->pipe_state |= PIPE_SEL; 1135 } 1136 } 1137 if (events & (POLLIN | POLLRDNORM)) { 1138 lwkt_reltoken(&rpipe_rlock); 1139 lwkt_reltoken(&rpipe_wlock); 1140 } 1141 if (events & (POLLOUT | POLLWRNORM)) { 1142 lwkt_reltoken(&wpipe_rlock); 1143 lwkt_reltoken(&wpipe_wlock); 1144 } 1145 } 1146 pipe_rel_mplock(&mpsave); 1147 return (revents); 1148 } 1149 1150 /* 1151 * MPSAFE 1152 */ 1153 static int 1154 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1155 { 1156 struct pipe *pipe; 1157 int mpsave; 1158 1159 pipe_get_mplock(&mpsave); 1160 pipe = (struct pipe *)fp->f_data; 1161 1162 bzero((caddr_t)ub, sizeof(*ub)); 1163 ub->st_mode = S_IFIFO; 1164 ub->st_blksize = pipe->pipe_buffer.size; 1165 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex; 1166 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1167 ub->st_atimespec = pipe->pipe_atime; 1168 ub->st_mtimespec = pipe->pipe_mtime; 1169 ub->st_ctimespec = pipe->pipe_ctime; 1170 /* 1171 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1172 * st_flags, st_gen. 1173 * XXX (st_dev, st_ino) should be unique. 1174 */ 1175 pipe_rel_mplock(&mpsave); 1176 return (0); 1177 } 1178 1179 /* 1180 * MPALMOSTSAFE - acquires mplock 1181 */ 1182 static int 1183 pipe_close(struct file *fp) 1184 { 1185 struct pipe *cpipe; 1186 1187 get_mplock(); 1188 cpipe = (struct pipe *)fp->f_data; 1189 fp->f_ops = &badfileops; 1190 fp->f_data = NULL; 1191 funsetown(cpipe->pipe_sigio); 1192 pipeclose(cpipe); 1193 rel_mplock(); 1194 return (0); 1195 } 1196 1197 /* 1198 * Shutdown one or both directions of a full-duplex pipe. 1199 * 1200 * MPALMOSTSAFE - acquires mplock 1201 */ 1202 static int 1203 pipe_shutdown(struct file *fp, int how) 1204 { 1205 struct pipe *rpipe; 1206 struct pipe *wpipe; 1207 int error = EPIPE; 1208 lwkt_tokref rpipe_rlock; 1209 lwkt_tokref rpipe_wlock; 1210 lwkt_tokref wpipe_rlock; 1211 lwkt_tokref wpipe_wlock; 1212 int mpsave; 1213 1214 pipe_get_mplock(&mpsave); 1215 rpipe = (struct pipe *)fp->f_data; 1216 wpipe = rpipe->pipe_peer; 1217 1218 /* 1219 * We modify pipe_state on both pipes, which means we need 1220 * all four tokens! 1221 */ 1222 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); 1223 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); 1224 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); 1225 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); 1226 1227 switch(how) { 1228 case SHUT_RDWR: 1229 case SHUT_RD: 1230 rpipe->pipe_state |= PIPE_REOF; /* my reads */ 1231 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */ 1232 if (rpipe->pipe_state & PIPE_WANTR) { 1233 rpipe->pipe_state &= ~PIPE_WANTR; 1234 wakeup(rpipe); 1235 } 1236 if (rpipe->pipe_state & PIPE_WANTW) { 1237 rpipe->pipe_state &= ~PIPE_WANTW; 1238 wakeup(rpipe); 1239 } 1240 error = 0; 1241 if (how == SHUT_RD) 1242 break; 1243 /* fall through */ 1244 case SHUT_WR: 1245 wpipe->pipe_state |= PIPE_REOF; /* peer reads */ 1246 wpipe->pipe_state |= PIPE_WEOF; /* my writes */ 1247 if (wpipe->pipe_state & PIPE_WANTR) { 1248 wpipe->pipe_state &= ~PIPE_WANTR; 1249 wakeup(wpipe); 1250 } 1251 if (wpipe->pipe_state & PIPE_WANTW) { 1252 wpipe->pipe_state &= ~PIPE_WANTW; 1253 wakeup(wpipe); 1254 } 1255 error = 0; 1256 break; 1257 } 1258 pipeselwakeup(rpipe); 1259 pipeselwakeup(wpipe); 1260 1261 lwkt_reltoken(&rpipe_rlock); 1262 lwkt_reltoken(&rpipe_wlock); 1263 lwkt_reltoken(&wpipe_rlock); 1264 lwkt_reltoken(&wpipe_wlock); 1265 1266 pipe_rel_mplock(&mpsave); 1267 return (error); 1268 } 1269 1270 static void 1271 pipe_free_kmem(struct pipe *cpipe) 1272 { 1273 if (cpipe->pipe_buffer.buffer != NULL) { 1274 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1275 atomic_subtract_int(&pipe_nbig, 1); 1276 kmem_free(&kernel_map, 1277 (vm_offset_t)cpipe->pipe_buffer.buffer, 1278 cpipe->pipe_buffer.size); 1279 cpipe->pipe_buffer.buffer = NULL; 1280 cpipe->pipe_buffer.object = NULL; 1281 } 1282 } 1283 1284 /* 1285 * Close the pipe. The slock must be held to interlock against simultanious 1286 * closes. The rlock and wlock must be held to adjust the pipe_state. 1287 */ 1288 static void 1289 pipeclose(struct pipe *cpipe) 1290 { 1291 globaldata_t gd; 1292 struct pipe *ppipe; 1293 lwkt_tokref cpipe_rlock; 1294 lwkt_tokref cpipe_wlock; 1295 lwkt_tokref ppipe_rlock; 1296 lwkt_tokref ppipe_wlock; 1297 1298 if (cpipe == NULL) 1299 return; 1300 1301 /* 1302 * The slock may not have been allocated yet (close during 1303 * initialization) 1304 * 1305 * We need both the read and write tokens to modify pipe_state. 1306 */ 1307 if (cpipe->pipe_slock) 1308 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE); 1309 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock); 1310 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock); 1311 1312 /* 1313 * Set our state, wakeup anyone waiting in select, and 1314 * wakeup anyone blocked on our pipe. 1315 */ 1316 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF; 1317 pipeselwakeup(cpipe); 1318 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1319 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1320 wakeup(cpipe); 1321 } 1322 1323 /* 1324 * Disconnect from peer. 1325 */ 1326 if ((ppipe = cpipe->pipe_peer) != NULL) { 1327 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock); 1328 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock); 1329 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF; 1330 pipeselwakeup(ppipe); 1331 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1332 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1333 wakeup(ppipe); 1334 } 1335 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) { 1336 get_mplock(); 1337 KNOTE(&ppipe->pipe_sel.si_note, 0); 1338 rel_mplock(); 1339 } 1340 lwkt_reltoken(&ppipe_rlock); 1341 lwkt_reltoken(&ppipe_wlock); 1342 } 1343 1344 /* 1345 * If the peer is also closed we can free resources for both 1346 * sides, otherwise we leave our side intact to deal with any 1347 * races (since we only have the slock). 1348 */ 1349 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) { 1350 cpipe->pipe_peer = NULL; 1351 ppipe->pipe_peer = NULL; 1352 ppipe->pipe_slock = NULL; /* we will free the slock */ 1353 pipeclose(ppipe); 1354 ppipe = NULL; 1355 } 1356 1357 lwkt_reltoken(&cpipe_rlock); 1358 lwkt_reltoken(&cpipe_wlock); 1359 if (cpipe->pipe_slock) 1360 lockmgr(cpipe->pipe_slock, LK_RELEASE); 1361 1362 /* 1363 * If we disassociated from our peer we can free resources 1364 */ 1365 if (ppipe == NULL) { 1366 gd = mycpu; 1367 if (cpipe->pipe_slock) { 1368 kfree(cpipe->pipe_slock, M_PIPE); 1369 cpipe->pipe_slock = NULL; 1370 } 1371 if (gd->gd_pipeqcount >= pipe_maxcache || 1372 cpipe->pipe_buffer.size != PIPE_SIZE 1373 ) { 1374 pipe_free_kmem(cpipe); 1375 kfree(cpipe, M_PIPE); 1376 } else { 1377 cpipe->pipe_state = 0; 1378 cpipe->pipe_peer = gd->gd_pipeq; 1379 gd->gd_pipeq = cpipe; 1380 ++gd->gd_pipeqcount; 1381 } 1382 } 1383 } 1384 1385 /* 1386 * MPALMOSTSAFE - acquires mplock 1387 */ 1388 static int 1389 pipe_kqfilter(struct file *fp, struct knote *kn) 1390 { 1391 struct pipe *cpipe; 1392 1393 get_mplock(); 1394 cpipe = (struct pipe *)kn->kn_fp->f_data; 1395 1396 switch (kn->kn_filter) { 1397 case EVFILT_READ: 1398 kn->kn_fop = &pipe_rfiltops; 1399 break; 1400 case EVFILT_WRITE: 1401 kn->kn_fop = &pipe_wfiltops; 1402 cpipe = cpipe->pipe_peer; 1403 if (cpipe == NULL) { 1404 /* other end of pipe has been closed */ 1405 rel_mplock(); 1406 return (EPIPE); 1407 } 1408 break; 1409 default: 1410 return (1); 1411 } 1412 kn->kn_hook = (caddr_t)cpipe; 1413 1414 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1415 rel_mplock(); 1416 return (0); 1417 } 1418 1419 static void 1420 filt_pipedetach(struct knote *kn) 1421 { 1422 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1423 1424 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1425 } 1426 1427 /*ARGSUSED*/ 1428 static int 1429 filt_piperead(struct knote *kn, long hint) 1430 { 1431 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1432 1433 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 1434 1435 /* XXX RACE */ 1436 if (rpipe->pipe_state & PIPE_REOF) { 1437 kn->kn_flags |= EV_EOF; 1438 return (1); 1439 } 1440 return (kn->kn_data > 0); 1441 } 1442 1443 /*ARGSUSED*/ 1444 static int 1445 filt_pipewrite(struct knote *kn, long hint) 1446 { 1447 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1448 struct pipe *wpipe = rpipe->pipe_peer; 1449 u_int32_t space; 1450 1451 /* XXX RACE */ 1452 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) { 1453 kn->kn_data = 0; 1454 kn->kn_flags |= EV_EOF; 1455 return (1); 1456 } 1457 space = wpipe->pipe_buffer.windex - 1458 wpipe->pipe_buffer.rindex; 1459 space = wpipe->pipe_buffer.size - space; 1460 kn->kn_data = space; 1461 return (kn->kn_data >= PIPE_BUF); 1462 } 1463