1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/fcntl.h> 34 #include <sys/file.h> 35 #include <sys/filedesc.h> 36 #include <sys/filio.h> 37 #include <sys/ttycom.h> 38 #include <sys/stat.h> 39 #include <sys/poll.h> 40 #include <sys/select.h> 41 #include <sys/signalvar.h> 42 #include <sys/sysproto.h> 43 #include <sys/pipe.h> 44 #include <sys/vnode.h> 45 #include <sys/uio.h> 46 #include <sys/event.h> 47 #include <sys/globaldata.h> 48 #include <sys/module.h> 49 #include <sys/malloc.h> 50 #include <sys/sysctl.h> 51 #include <sys/socket.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_extern.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_zone.h> 63 64 #include <sys/file2.h> 65 #include <sys/signal2.h> 66 #include <sys/mplock2.h> 67 68 #include <machine/cpufunc.h> 69 70 /* 71 * interfaces to the outside world 72 */ 73 static int pipe_read (struct file *fp, struct uio *uio, 74 struct ucred *cred, int flags); 75 static int pipe_write (struct file *fp, struct uio *uio, 76 struct ucred *cred, int flags); 77 static int pipe_close (struct file *fp); 78 static int pipe_shutdown (struct file *fp, int how); 79 static int pipe_poll (struct file *fp, int events, struct ucred *cred); 80 static int pipe_kqfilter (struct file *fp, struct knote *kn); 81 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 82 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, 83 struct ucred *cred, struct sysmsg *msg); 84 85 static struct fileops pipeops = { 86 .fo_read = pipe_read, 87 .fo_write = pipe_write, 88 .fo_ioctl = pipe_ioctl, 89 .fo_poll = pipe_poll, 90 .fo_kqfilter = pipe_kqfilter, 91 .fo_stat = pipe_stat, 92 .fo_close = pipe_close, 93 .fo_shutdown = pipe_shutdown 94 }; 95 96 static void filt_pipedetach(struct knote *kn); 97 static int filt_piperead(struct knote *kn, long hint); 98 static int filt_pipewrite(struct knote *kn, long hint); 99 100 static struct filterops pipe_rfiltops = 101 { 1, NULL, filt_pipedetach, filt_piperead }; 102 static struct filterops pipe_wfiltops = 103 { 1, NULL, filt_pipedetach, filt_pipewrite }; 104 105 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 106 107 /* 108 * Default pipe buffer size(s), this can be kind-of large now because pipe 109 * space is pageable. The pipe code will try to maintain locality of 110 * reference for performance reasons, so small amounts of outstanding I/O 111 * will not wipe the cache. 112 */ 113 #define MINPIPESIZE (PIPE_SIZE/3) 114 #define MAXPIPESIZE (2*PIPE_SIZE/3) 115 116 /* 117 * Limit the number of "big" pipes 118 */ 119 #define LIMITBIGPIPES 64 120 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 121 122 static int pipe_maxbig = LIMITBIGPIPES; 123 static int pipe_maxcache = PIPEQ_MAX_CACHE; 124 static int pipe_bigcount; 125 static int pipe_nbig; 126 static int pipe_bcache_alloc; 127 static int pipe_bkmem_alloc; 128 static int pipe_rblocked_count; 129 static int pipe_wblocked_count; 130 131 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 132 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 133 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 134 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount, 135 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded"); 136 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked, 137 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded"); 138 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked, 139 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded"); 140 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 141 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 142 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 143 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 144 #ifdef SMP 145 static int pipe_delay = 5000; /* 5uS default */ 146 SYSCTL_INT(_kern_pipe, OID_AUTO, delay, 147 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns"); 148 static int pipe_mpsafe = 1; 149 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe, 150 CTLFLAG_RW, &pipe_mpsafe, 0, ""); 151 #endif 152 #if !defined(NO_PIPE_SYSCTL_STATS) 153 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 154 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 155 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 156 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 157 #endif 158 159 static void pipeclose (struct pipe *cpipe); 160 static void pipe_free_kmem (struct pipe *cpipe); 161 static int pipe_create (struct pipe **cpipep); 162 static __inline void pipeselwakeup (struct pipe *cpipe); 163 static int pipespace (struct pipe *cpipe, int size); 164 165 static __inline int 166 pipeseltest(struct pipe *cpipe) 167 { 168 return ((cpipe->pipe_state & PIPE_SEL) || 169 ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) || 170 SLIST_FIRST(&cpipe->pipe_sel.si_note)); 171 } 172 173 static __inline void 174 pipeselwakeup(struct pipe *cpipe) 175 { 176 if (cpipe->pipe_state & PIPE_SEL) { 177 get_mplock(); 178 cpipe->pipe_state &= ~PIPE_SEL; 179 selwakeup(&cpipe->pipe_sel); 180 rel_mplock(); 181 } 182 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) { 183 get_mplock(); 184 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 185 rel_mplock(); 186 } 187 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) { 188 get_mplock(); 189 KNOTE(&cpipe->pipe_sel.si_note, 0); 190 rel_mplock(); 191 } 192 } 193 194 /* 195 * These routines are called before and after a UIO. The UIO 196 * may block, causing our held tokens to be lost temporarily. 197 * 198 * We use these routines to serialize reads against other reads 199 * and writes against other writes. 200 * 201 * The read token is held on entry so *ipp does not race. 202 */ 203 static __inline int 204 pipe_start_uio(struct pipe *cpipe, int *ipp) 205 { 206 int error; 207 208 while (*ipp) { 209 *ipp = -1; 210 error = tsleep(ipp, PCATCH, "pipexx", 0); 211 if (error) 212 return (error); 213 } 214 *ipp = 1; 215 return (0); 216 } 217 218 static __inline void 219 pipe_end_uio(struct pipe *cpipe, int *ipp) 220 { 221 if (*ipp < 0) { 222 *ipp = 0; 223 wakeup(ipp); 224 } else { 225 KKASSERT(*ipp > 0); 226 *ipp = 0; 227 } 228 } 229 230 static __inline void 231 pipe_get_mplock(int *save) 232 { 233 #ifdef SMP 234 if (pipe_mpsafe == 0) { 235 get_mplock(); 236 *save = 1; 237 } else 238 #endif 239 { 240 *save = 0; 241 } 242 } 243 244 static __inline void 245 pipe_rel_mplock(int *save) 246 { 247 #ifdef SMP 248 if (*save) 249 rel_mplock(); 250 #endif 251 } 252 253 254 /* 255 * The pipe system call for the DTYPE_PIPE type of pipes 256 * 257 * pipe_args(int dummy) 258 * 259 * MPSAFE 260 */ 261 int 262 sys_pipe(struct pipe_args *uap) 263 { 264 struct thread *td = curthread; 265 struct filedesc *fdp = td->td_proc->p_fd; 266 struct file *rf, *wf; 267 struct pipe *rpipe, *wpipe; 268 int fd1, fd2, error; 269 270 rpipe = wpipe = NULL; 271 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 272 pipeclose(rpipe); 273 pipeclose(wpipe); 274 return (ENFILE); 275 } 276 277 error = falloc(td->td_lwp, &rf, &fd1); 278 if (error) { 279 pipeclose(rpipe); 280 pipeclose(wpipe); 281 return (error); 282 } 283 uap->sysmsg_fds[0] = fd1; 284 285 /* 286 * Warning: once we've gotten past allocation of the fd for the 287 * read-side, we can only drop the read side via fdrop() in order 288 * to avoid races against processes which manage to dup() the read 289 * side while we are blocked trying to allocate the write side. 290 */ 291 rf->f_type = DTYPE_PIPE; 292 rf->f_flag = FREAD | FWRITE; 293 rf->f_ops = &pipeops; 294 rf->f_data = rpipe; 295 error = falloc(td->td_lwp, &wf, &fd2); 296 if (error) { 297 fsetfd(fdp, NULL, fd1); 298 fdrop(rf); 299 /* rpipe has been closed by fdrop(). */ 300 pipeclose(wpipe); 301 return (error); 302 } 303 wf->f_type = DTYPE_PIPE; 304 wf->f_flag = FREAD | FWRITE; 305 wf->f_ops = &pipeops; 306 wf->f_data = wpipe; 307 uap->sysmsg_fds[1] = fd2; 308 309 rpipe->pipe_slock = kmalloc(sizeof(struct lock), 310 M_PIPE, M_WAITOK|M_ZERO); 311 wpipe->pipe_slock = rpipe->pipe_slock; 312 rpipe->pipe_peer = wpipe; 313 wpipe->pipe_peer = rpipe; 314 lockinit(rpipe->pipe_slock, "pipecl", 0, 0); 315 316 /* 317 * Once activated the peer relationship remains valid until 318 * both sides are closed. 319 */ 320 fsetfd(fdp, rf, fd1); 321 fsetfd(fdp, wf, fd2); 322 fdrop(rf); 323 fdrop(wf); 324 325 return (0); 326 } 327 328 /* 329 * Allocate kva for pipe circular buffer, the space is pageable 330 * This routine will 'realloc' the size of a pipe safely, if it fails 331 * it will retain the old buffer. 332 * If it fails it will return ENOMEM. 333 */ 334 static int 335 pipespace(struct pipe *cpipe, int size) 336 { 337 struct vm_object *object; 338 caddr_t buffer; 339 int npages, error; 340 341 npages = round_page(size) / PAGE_SIZE; 342 object = cpipe->pipe_buffer.object; 343 344 /* 345 * [re]create the object if necessary and reserve space for it 346 * in the kernel_map. The object and memory are pageable. On 347 * success, free the old resources before assigning the new 348 * ones. 349 */ 350 if (object == NULL || object->size != npages) { 351 get_mplock(); 352 object = vm_object_allocate(OBJT_DEFAULT, npages); 353 buffer = (caddr_t)vm_map_min(&kernel_map); 354 355 error = vm_map_find(&kernel_map, object, 0, 356 (vm_offset_t *)&buffer, 357 size, PAGE_SIZE, 358 1, VM_MAPTYPE_NORMAL, 359 VM_PROT_ALL, VM_PROT_ALL, 360 0); 361 362 if (error != KERN_SUCCESS) { 363 vm_object_deallocate(object); 364 rel_mplock(); 365 return (ENOMEM); 366 } 367 pipe_free_kmem(cpipe); 368 rel_mplock(); 369 cpipe->pipe_buffer.object = object; 370 cpipe->pipe_buffer.buffer = buffer; 371 cpipe->pipe_buffer.size = size; 372 ++pipe_bkmem_alloc; 373 } else { 374 ++pipe_bcache_alloc; 375 } 376 cpipe->pipe_buffer.rindex = 0; 377 cpipe->pipe_buffer.windex = 0; 378 return (0); 379 } 380 381 /* 382 * Initialize and allocate VM and memory for pipe, pulling the pipe from 383 * our per-cpu cache if possible. For now make sure it is sized for the 384 * smaller PIPE_SIZE default. 385 */ 386 static int 387 pipe_create(struct pipe **cpipep) 388 { 389 globaldata_t gd = mycpu; 390 struct pipe *cpipe; 391 int error; 392 393 if ((cpipe = gd->gd_pipeq) != NULL) { 394 gd->gd_pipeq = cpipe->pipe_peer; 395 --gd->gd_pipeqcount; 396 cpipe->pipe_peer = NULL; 397 cpipe->pipe_wantwcnt = 0; 398 } else { 399 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 400 } 401 *cpipep = cpipe; 402 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 403 return (error); 404 vfs_timestamp(&cpipe->pipe_ctime); 405 cpipe->pipe_atime = cpipe->pipe_ctime; 406 cpipe->pipe_mtime = cpipe->pipe_ctime; 407 lwkt_token_init(&cpipe->pipe_rlock); 408 lwkt_token_init(&cpipe->pipe_wlock); 409 return (0); 410 } 411 412 /* 413 * MPALMOSTSAFE (acquires mplock) 414 */ 415 static int 416 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 417 { 418 struct pipe *rpipe; 419 int error; 420 size_t nread = 0; 421 int nbio; 422 u_int size; /* total bytes available */ 423 u_int nsize; /* total bytes to read */ 424 u_int rindex; /* contiguous bytes available */ 425 int notify_writer; 426 lwkt_tokref rlock; 427 lwkt_tokref wlock; 428 int mpsave; 429 int bigread; 430 int bigcount; 431 432 if (uio->uio_resid == 0) 433 return(0); 434 435 /* 436 * Setup locks, calculate nbio 437 */ 438 pipe_get_mplock(&mpsave); 439 rpipe = (struct pipe *)fp->f_data; 440 lwkt_gettoken(&rlock, &rpipe->pipe_rlock); 441 442 if (fflags & O_FBLOCKING) 443 nbio = 0; 444 else if (fflags & O_FNONBLOCKING) 445 nbio = 1; 446 else if (fp->f_flag & O_NONBLOCK) 447 nbio = 1; 448 else 449 nbio = 0; 450 451 /* 452 * Reads are serialized. Note howeverthat pipe_buffer.buffer and 453 * pipe_buffer.size can change out from under us when the number 454 * of bytes in the buffer are zero due to the write-side doing a 455 * pipespace(). 456 */ 457 error = pipe_start_uio(rpipe, &rpipe->pipe_rip); 458 if (error) { 459 pipe_rel_mplock(&mpsave); 460 lwkt_reltoken(&rlock); 461 return (error); 462 } 463 notify_writer = 0; 464 465 bigread = (uio->uio_resid > 10 * 1024 * 1024); 466 bigcount = 10; 467 468 while (uio->uio_resid) { 469 /* 470 * Don't hog the cpu. 471 */ 472 if (bigread && --bigcount == 0) { 473 lwkt_user_yield(); 474 bigcount = 10; 475 if (CURSIG(curthread->td_lwp)) { 476 error = EINTR; 477 break; 478 } 479 } 480 481 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 482 cpu_lfence(); 483 if (size) { 484 rindex = rpipe->pipe_buffer.rindex & 485 (rpipe->pipe_buffer.size - 1); 486 nsize = size; 487 if (nsize > rpipe->pipe_buffer.size - rindex) 488 nsize = rpipe->pipe_buffer.size - rindex; 489 nsize = szmin(nsize, uio->uio_resid); 490 491 error = uiomove(&rpipe->pipe_buffer.buffer[rindex], 492 nsize, uio); 493 if (error) 494 break; 495 cpu_mfence(); 496 rpipe->pipe_buffer.rindex += nsize; 497 nread += nsize; 498 499 /* 500 * If the FIFO is still over half full just continue 501 * and do not try to notify the writer yet. 502 */ 503 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) { 504 notify_writer = 0; 505 continue; 506 } 507 508 /* 509 * When the FIFO is less then half full notify any 510 * waiting writer. WANTW can be checked while 511 * holding just the rlock. 512 */ 513 notify_writer = 1; 514 if ((rpipe->pipe_state & PIPE_WANTW) == 0) 515 continue; 516 } 517 518 /* 519 * If the "write-side" was blocked we wake it up. This code 520 * is reached either when the buffer is completely emptied 521 * or if it becomes more then half-empty. 522 * 523 * Pipe_state can only be modified if both the rlock and 524 * wlock are held. 525 */ 526 if (rpipe->pipe_state & PIPE_WANTW) { 527 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 528 if (rpipe->pipe_state & PIPE_WANTW) { 529 notify_writer = 0; 530 rpipe->pipe_state &= ~PIPE_WANTW; 531 lwkt_reltoken(&wlock); 532 wakeup(rpipe); 533 } else { 534 lwkt_reltoken(&wlock); 535 } 536 } 537 538 /* 539 * Pick up our copy loop again if the writer sent data to 540 * us while we were messing around. 541 * 542 * On a SMP box poll up to pipe_delay nanoseconds for new 543 * data. Typically a value of 2000 to 4000 is sufficient 544 * to eradicate most IPIs/tsleeps/wakeups when a pipe 545 * is used for synchronous communications with small packets, 546 * and 8000 or so (8uS) will pipeline large buffer xfers 547 * between cpus over a pipe. 548 * 549 * For synchronous communications a hit means doing a 550 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS, 551 * where as miss requiring a tsleep/wakeup sequence 552 * will take 7uS or more. 553 */ 554 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) 555 continue; 556 557 #if defined(SMP) && defined(_RDTSC_SUPPORTED_) 558 if (pipe_delay) { 559 int64_t tsc_target; 560 int good = 0; 561 562 tsc_target = tsc_get_target(pipe_delay); 563 while (tsc_test_target(tsc_target) == 0) { 564 if (rpipe->pipe_buffer.windex != 565 rpipe->pipe_buffer.rindex) { 566 good = 1; 567 break; 568 } 569 } 570 if (good) 571 continue; 572 } 573 #endif 574 575 /* 576 * Detect EOF condition, do not set error. 577 */ 578 if (rpipe->pipe_state & PIPE_REOF) 579 break; 580 581 /* 582 * Break if some data was read, or if this was a non-blocking 583 * read. 584 */ 585 if (nread > 0) 586 break; 587 588 if (nbio) { 589 error = EAGAIN; 590 break; 591 } 592 593 /* 594 * Last chance, interlock with WANTR. 595 */ 596 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 597 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 598 if (size) { 599 lwkt_reltoken(&wlock); 600 continue; 601 } 602 603 /* 604 * Retest EOF - acquiring a new token can temporarily release 605 * tokens already held. 606 */ 607 if (rpipe->pipe_state & PIPE_REOF) { 608 lwkt_reltoken(&wlock); 609 break; 610 } 611 612 /* 613 * If there is no more to read in the pipe, reset its 614 * pointers to the beginning. This improves cache hit 615 * stats. 616 * 617 * We need both locks to modify both pointers, and there 618 * must also not be a write in progress or the uiomove() 619 * in the write might block and temporarily release 620 * its wlock, then reacquire and update windex. We are 621 * only serialized against reads, not writes. 622 * 623 * XXX should we even bother resetting the indices? It 624 * might actually be more cache efficient not to. 625 */ 626 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex && 627 rpipe->pipe_wip == 0) { 628 rpipe->pipe_buffer.rindex = 0; 629 rpipe->pipe_buffer.windex = 0; 630 } 631 632 /* 633 * Wait for more data. 634 * 635 * Pipe_state can only be set if both the rlock and wlock 636 * are held. 637 */ 638 rpipe->pipe_state |= PIPE_WANTR; 639 tsleep_interlock(rpipe, PCATCH); 640 lwkt_reltoken(&wlock); 641 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0); 642 ++pipe_rblocked_count; 643 if (error) 644 break; 645 } 646 pipe_end_uio(rpipe, &rpipe->pipe_rip); 647 648 /* 649 * Uptime last access time 650 */ 651 if (error == 0 && nread) 652 vfs_timestamp(&rpipe->pipe_atime); 653 654 /* 655 * If we drained the FIFO more then half way then handle 656 * write blocking hysteresis. 657 * 658 * Note that PIPE_WANTW cannot be set by the writer without 659 * it holding both rlock and wlock, so we can test it 660 * while holding just rlock. 661 */ 662 if (notify_writer) { 663 if (rpipe->pipe_state & PIPE_WANTW) { 664 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 665 if (rpipe->pipe_state & PIPE_WANTW) { 666 rpipe->pipe_state &= ~PIPE_WANTW; 667 lwkt_reltoken(&wlock); 668 wakeup(rpipe); 669 } else { 670 lwkt_reltoken(&wlock); 671 } 672 } 673 if (pipeseltest(rpipe)) { 674 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 675 pipeselwakeup(rpipe); 676 lwkt_reltoken(&wlock); 677 } 678 } 679 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/ 680 lwkt_reltoken(&rlock); 681 682 pipe_rel_mplock(&mpsave); 683 return (error); 684 } 685 686 /* 687 * MPALMOSTSAFE - acquires mplock 688 */ 689 static int 690 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 691 { 692 int error; 693 int orig_resid; 694 int nbio; 695 struct pipe *wpipe, *rpipe; 696 lwkt_tokref rlock; 697 lwkt_tokref wlock; 698 u_int windex; 699 u_int space; 700 u_int wcount; 701 int mpsave; 702 int bigwrite; 703 int bigcount; 704 705 pipe_get_mplock(&mpsave); 706 707 /* 708 * Writes go to the peer. The peer will always exist. 709 */ 710 rpipe = (struct pipe *) fp->f_data; 711 wpipe = rpipe->pipe_peer; 712 lwkt_gettoken(&wlock, &wpipe->pipe_wlock); 713 if (wpipe->pipe_state & PIPE_WEOF) { 714 pipe_rel_mplock(&mpsave); 715 lwkt_reltoken(&wlock); 716 return (EPIPE); 717 } 718 719 /* 720 * Degenerate case (EPIPE takes prec) 721 */ 722 if (uio->uio_resid == 0) { 723 pipe_rel_mplock(&mpsave); 724 lwkt_reltoken(&wlock); 725 return(0); 726 } 727 728 /* 729 * Writes are serialized (start_uio must be called with wlock) 730 */ 731 error = pipe_start_uio(wpipe, &wpipe->pipe_wip); 732 if (error) { 733 pipe_rel_mplock(&mpsave); 734 lwkt_reltoken(&wlock); 735 return (error); 736 } 737 738 if (fflags & O_FBLOCKING) 739 nbio = 0; 740 else if (fflags & O_FNONBLOCKING) 741 nbio = 1; 742 else if (fp->f_flag & O_NONBLOCK) 743 nbio = 1; 744 else 745 nbio = 0; 746 747 /* 748 * If it is advantageous to resize the pipe buffer, do 749 * so. We are write-serialized so we can block safely. 750 */ 751 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 752 (pipe_nbig < pipe_maxbig) && 753 wpipe->pipe_wantwcnt > 4 && 754 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 755 /* 756 * Recheck after lock. 757 */ 758 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 759 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 760 (pipe_nbig < pipe_maxbig) && 761 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 762 atomic_add_int(&pipe_nbig, 1); 763 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 764 ++pipe_bigcount; 765 else 766 atomic_subtract_int(&pipe_nbig, 1); 767 } 768 lwkt_reltoken(&rlock); 769 } 770 771 orig_resid = uio->uio_resid; 772 wcount = 0; 773 774 bigwrite = (uio->uio_resid > 10 * 1024 * 1024); 775 bigcount = 10; 776 777 while (uio->uio_resid) { 778 if (wpipe->pipe_state & PIPE_WEOF) { 779 error = EPIPE; 780 break; 781 } 782 783 /* 784 * Don't hog the cpu. 785 */ 786 if (bigwrite && --bigcount == 0) { 787 lwkt_user_yield(); 788 bigcount = 10; 789 if (CURSIG(curthread->td_lwp)) { 790 error = EINTR; 791 break; 792 } 793 } 794 795 windex = wpipe->pipe_buffer.windex & 796 (wpipe->pipe_buffer.size - 1); 797 space = wpipe->pipe_buffer.size - 798 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 799 cpu_lfence(); 800 801 /* Writes of size <= PIPE_BUF must be atomic. */ 802 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 803 space = 0; 804 805 /* 806 * Write to fill, read size handles write hysteresis. Also 807 * additional restrictions can cause select-based non-blocking 808 * writes to spin. 809 */ 810 if (space > 0) { 811 u_int segsize; 812 813 /* 814 * Transfer size is minimum of uio transfer 815 * and free space in pipe buffer. 816 * 817 * Limit each uiocopy to no more then PIPE_SIZE 818 * so we can keep the gravy train going on a 819 * SMP box. This doubles the performance for 820 * write sizes > 16K. Otherwise large writes 821 * wind up doing an inefficient synchronous 822 * ping-pong. 823 */ 824 space = szmin(space, uio->uio_resid); 825 if (space > PIPE_SIZE) 826 space = PIPE_SIZE; 827 828 /* 829 * First segment to transfer is minimum of 830 * transfer size and contiguous space in 831 * pipe buffer. If first segment to transfer 832 * is less than the transfer size, we've got 833 * a wraparound in the buffer. 834 */ 835 segsize = wpipe->pipe_buffer.size - windex; 836 if (segsize > space) 837 segsize = space; 838 839 #ifdef SMP 840 /* 841 * If this is the first loop and the reader is 842 * blocked, do a preemptive wakeup of the reader. 843 * 844 * On SMP the IPI latency plus the wlock interlock 845 * on the reader side is the fastest way to get the 846 * reader going. (The scheduler will hard loop on 847 * lock tokens). 848 * 849 * NOTE: We can't clear WANTR here without acquiring 850 * the rlock, which we don't want to do here! 851 */ 852 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1) 853 wakeup(wpipe); 854 #endif 855 856 /* 857 * Transfer segment, which may include a wrap-around. 858 * Update windex to account for both all in one go 859 * so the reader can read() the data atomically. 860 */ 861 error = uiomove(&wpipe->pipe_buffer.buffer[windex], 862 segsize, uio); 863 if (error == 0 && segsize < space) { 864 segsize = space - segsize; 865 error = uiomove(&wpipe->pipe_buffer.buffer[0], 866 segsize, uio); 867 } 868 if (error) 869 break; 870 cpu_mfence(); 871 wpipe->pipe_buffer.windex += space; 872 wcount += space; 873 continue; 874 } 875 876 /* 877 * We need both the rlock and the wlock to interlock against 878 * the EOF, WANTW, and size checks, and to modify pipe_state. 879 * 880 * These are token locks so we do not have to worry about 881 * deadlocks. 882 */ 883 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 884 885 /* 886 * If the "read-side" has been blocked, wake it up now 887 * and yield to let it drain synchronously rather 888 * then block. 889 */ 890 if (wpipe->pipe_state & PIPE_WANTR) { 891 wpipe->pipe_state &= ~PIPE_WANTR; 892 wakeup(wpipe); 893 } 894 895 /* 896 * don't block on non-blocking I/O 897 */ 898 if (nbio) { 899 lwkt_reltoken(&rlock); 900 error = EAGAIN; 901 break; 902 } 903 904 /* 905 * re-test whether we have to block in the writer after 906 * acquiring both locks, in case the reader opened up 907 * some space. 908 */ 909 space = wpipe->pipe_buffer.size - 910 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 911 cpu_lfence(); 912 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 913 space = 0; 914 915 /* 916 * Retest EOF - acquiring a new token can temporarily release 917 * tokens already held. 918 */ 919 if (wpipe->pipe_state & PIPE_WEOF) { 920 lwkt_reltoken(&rlock); 921 error = EPIPE; 922 break; 923 } 924 925 /* 926 * We have no more space and have something to offer, 927 * wake up select/poll. 928 */ 929 if (space == 0) { 930 wpipe->pipe_state |= PIPE_WANTW; 931 ++wpipe->pipe_wantwcnt; 932 pipeselwakeup(wpipe); 933 if (wpipe->pipe_state & PIPE_WANTW) 934 error = tsleep(wpipe, PCATCH, "pipewr", 0); 935 ++pipe_wblocked_count; 936 } 937 lwkt_reltoken(&rlock); 938 939 /* 940 * Break out if we errored or the read side wants us to go 941 * away. 942 */ 943 if (error) 944 break; 945 if (wpipe->pipe_state & PIPE_WEOF) { 946 error = EPIPE; 947 break; 948 } 949 } 950 pipe_end_uio(wpipe, &wpipe->pipe_wip); 951 952 /* 953 * If we have put any characters in the buffer, we wake up 954 * the reader. 955 * 956 * Both rlock and wlock are required to be able to modify pipe_state. 957 */ 958 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) { 959 if (wpipe->pipe_state & PIPE_WANTR) { 960 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 961 if (wpipe->pipe_state & PIPE_WANTR) { 962 wpipe->pipe_state &= ~PIPE_WANTR; 963 lwkt_reltoken(&rlock); 964 wakeup(wpipe); 965 } else { 966 lwkt_reltoken(&rlock); 967 } 968 } 969 if (pipeseltest(wpipe)) { 970 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 971 pipeselwakeup(wpipe); 972 lwkt_reltoken(&rlock); 973 } 974 } 975 976 /* 977 * Don't return EPIPE if I/O was successful 978 */ 979 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) && 980 (uio->uio_resid == 0) && 981 (error == EPIPE)) { 982 error = 0; 983 } 984 985 if (error == 0) 986 vfs_timestamp(&wpipe->pipe_mtime); 987 988 /* 989 * We have something to offer, 990 * wake up select/poll. 991 */ 992 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/ 993 lwkt_reltoken(&wlock); 994 pipe_rel_mplock(&mpsave); 995 return (error); 996 } 997 998 /* 999 * MPALMOSTSAFE - acquires mplock 1000 * 1001 * we implement a very minimal set of ioctls for compatibility with sockets. 1002 */ 1003 int 1004 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, 1005 struct ucred *cred, struct sysmsg *msg) 1006 { 1007 struct pipe *mpipe; 1008 lwkt_tokref rlock; 1009 lwkt_tokref wlock; 1010 int error; 1011 int mpsave; 1012 1013 pipe_get_mplock(&mpsave); 1014 mpipe = (struct pipe *)fp->f_data; 1015 1016 lwkt_gettoken(&rlock, &mpipe->pipe_rlock); 1017 lwkt_gettoken(&wlock, &mpipe->pipe_wlock); 1018 1019 switch (cmd) { 1020 case FIOASYNC: 1021 if (*(int *)data) { 1022 mpipe->pipe_state |= PIPE_ASYNC; 1023 } else { 1024 mpipe->pipe_state &= ~PIPE_ASYNC; 1025 } 1026 error = 0; 1027 break; 1028 case FIONREAD: 1029 *(int *)data = mpipe->pipe_buffer.windex - 1030 mpipe->pipe_buffer.rindex; 1031 error = 0; 1032 break; 1033 case FIOSETOWN: 1034 get_mplock(); 1035 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1036 rel_mplock(); 1037 break; 1038 case FIOGETOWN: 1039 *(int *)data = fgetown(mpipe->pipe_sigio); 1040 error = 0; 1041 break; 1042 case TIOCSPGRP: 1043 /* This is deprecated, FIOSETOWN should be used instead. */ 1044 get_mplock(); 1045 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1046 rel_mplock(); 1047 break; 1048 1049 case TIOCGPGRP: 1050 /* This is deprecated, FIOGETOWN should be used instead. */ 1051 *(int *)data = -fgetown(mpipe->pipe_sigio); 1052 error = 0; 1053 break; 1054 default: 1055 error = ENOTTY; 1056 break; 1057 } 1058 lwkt_reltoken(&rlock); 1059 lwkt_reltoken(&wlock); 1060 pipe_rel_mplock(&mpsave); 1061 1062 return (error); 1063 } 1064 1065 /* 1066 * MPALMOSTSAFE - acquires mplock 1067 * 1068 * poll for events (helper) 1069 */ 1070 static int 1071 pipe_poll_events(struct pipe *rpipe, struct pipe *wpipe, int events) 1072 { 1073 int revents = 0; 1074 u_int space; 1075 1076 if (events & (POLLIN | POLLRDNORM)) { 1077 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) || 1078 (rpipe->pipe_state & PIPE_REOF)) { 1079 revents |= events & (POLLIN | POLLRDNORM); 1080 } 1081 } 1082 1083 if (events & (POLLOUT | POLLWRNORM)) { 1084 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) { 1085 revents |= events & (POLLOUT | POLLWRNORM); 1086 } else { 1087 space = wpipe->pipe_buffer.windex - 1088 wpipe->pipe_buffer.rindex; 1089 space = wpipe->pipe_buffer.size - space; 1090 if (space >= PIPE_BUF) 1091 revents |= events & (POLLOUT | POLLWRNORM); 1092 } 1093 } 1094 1095 if ((rpipe->pipe_state & PIPE_REOF) || 1096 (wpipe == NULL) || 1097 (wpipe->pipe_state & PIPE_WEOF)) { 1098 revents |= POLLHUP; 1099 } 1100 return (revents); 1101 } 1102 1103 /* 1104 * Poll for events from file pointer. 1105 */ 1106 int 1107 pipe_poll(struct file *fp, int events, struct ucred *cred) 1108 { 1109 lwkt_tokref rpipe_rlock; 1110 lwkt_tokref rpipe_wlock; 1111 lwkt_tokref wpipe_rlock; 1112 lwkt_tokref wpipe_wlock; 1113 struct pipe *rpipe; 1114 struct pipe *wpipe; 1115 int revents = 0; 1116 int mpsave; 1117 1118 pipe_get_mplock(&mpsave); 1119 rpipe = (struct pipe *)fp->f_data; 1120 wpipe = rpipe->pipe_peer; 1121 1122 revents = pipe_poll_events(rpipe, wpipe, events); 1123 if (revents == 0) { 1124 if (events & (POLLIN | POLLRDNORM)) { 1125 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); 1126 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); 1127 } 1128 if (events & (POLLOUT | POLLWRNORM)) { 1129 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); 1130 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); 1131 } 1132 revents = pipe_poll_events(rpipe, wpipe, events); 1133 if (revents == 0) { 1134 if (events & (POLLIN | POLLRDNORM)) { 1135 selrecord(curthread, &rpipe->pipe_sel); 1136 rpipe->pipe_state |= PIPE_SEL; 1137 } 1138 1139 if (events & (POLLOUT | POLLWRNORM)) { 1140 selrecord(curthread, &wpipe->pipe_sel); 1141 wpipe->pipe_state |= PIPE_SEL; 1142 } 1143 } 1144 if (events & (POLLIN | POLLRDNORM)) { 1145 lwkt_reltoken(&rpipe_rlock); 1146 lwkt_reltoken(&rpipe_wlock); 1147 } 1148 if (events & (POLLOUT | POLLWRNORM)) { 1149 lwkt_reltoken(&wpipe_rlock); 1150 lwkt_reltoken(&wpipe_wlock); 1151 } 1152 } 1153 pipe_rel_mplock(&mpsave); 1154 return (revents); 1155 } 1156 1157 /* 1158 * MPSAFE 1159 */ 1160 static int 1161 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1162 { 1163 struct pipe *pipe; 1164 int mpsave; 1165 1166 pipe_get_mplock(&mpsave); 1167 pipe = (struct pipe *)fp->f_data; 1168 1169 bzero((caddr_t)ub, sizeof(*ub)); 1170 ub->st_mode = S_IFIFO; 1171 ub->st_blksize = pipe->pipe_buffer.size; 1172 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex; 1173 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1174 ub->st_atimespec = pipe->pipe_atime; 1175 ub->st_mtimespec = pipe->pipe_mtime; 1176 ub->st_ctimespec = pipe->pipe_ctime; 1177 /* 1178 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1179 * st_flags, st_gen. 1180 * XXX (st_dev, st_ino) should be unique. 1181 */ 1182 pipe_rel_mplock(&mpsave); 1183 return (0); 1184 } 1185 1186 /* 1187 * MPALMOSTSAFE - acquires mplock 1188 */ 1189 static int 1190 pipe_close(struct file *fp) 1191 { 1192 struct pipe *cpipe; 1193 1194 get_mplock(); 1195 cpipe = (struct pipe *)fp->f_data; 1196 fp->f_ops = &badfileops; 1197 fp->f_data = NULL; 1198 funsetown(cpipe->pipe_sigio); 1199 pipeclose(cpipe); 1200 rel_mplock(); 1201 return (0); 1202 } 1203 1204 /* 1205 * Shutdown one or both directions of a full-duplex pipe. 1206 * 1207 * MPALMOSTSAFE - acquires mplock 1208 */ 1209 static int 1210 pipe_shutdown(struct file *fp, int how) 1211 { 1212 struct pipe *rpipe; 1213 struct pipe *wpipe; 1214 int error = EPIPE; 1215 lwkt_tokref rpipe_rlock; 1216 lwkt_tokref rpipe_wlock; 1217 lwkt_tokref wpipe_rlock; 1218 lwkt_tokref wpipe_wlock; 1219 int mpsave; 1220 1221 pipe_get_mplock(&mpsave); 1222 rpipe = (struct pipe *)fp->f_data; 1223 wpipe = rpipe->pipe_peer; 1224 1225 /* 1226 * We modify pipe_state on both pipes, which means we need 1227 * all four tokens! 1228 */ 1229 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); 1230 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); 1231 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); 1232 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); 1233 1234 switch(how) { 1235 case SHUT_RDWR: 1236 case SHUT_RD: 1237 rpipe->pipe_state |= PIPE_REOF; /* my reads */ 1238 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */ 1239 if (rpipe->pipe_state & PIPE_WANTR) { 1240 rpipe->pipe_state &= ~PIPE_WANTR; 1241 wakeup(rpipe); 1242 } 1243 if (rpipe->pipe_state & PIPE_WANTW) { 1244 rpipe->pipe_state &= ~PIPE_WANTW; 1245 wakeup(rpipe); 1246 } 1247 error = 0; 1248 if (how == SHUT_RD) 1249 break; 1250 /* fall through */ 1251 case SHUT_WR: 1252 wpipe->pipe_state |= PIPE_REOF; /* peer reads */ 1253 wpipe->pipe_state |= PIPE_WEOF; /* my writes */ 1254 if (wpipe->pipe_state & PIPE_WANTR) { 1255 wpipe->pipe_state &= ~PIPE_WANTR; 1256 wakeup(wpipe); 1257 } 1258 if (wpipe->pipe_state & PIPE_WANTW) { 1259 wpipe->pipe_state &= ~PIPE_WANTW; 1260 wakeup(wpipe); 1261 } 1262 error = 0; 1263 break; 1264 } 1265 pipeselwakeup(rpipe); 1266 pipeselwakeup(wpipe); 1267 1268 lwkt_reltoken(&rpipe_rlock); 1269 lwkt_reltoken(&rpipe_wlock); 1270 lwkt_reltoken(&wpipe_rlock); 1271 lwkt_reltoken(&wpipe_wlock); 1272 1273 pipe_rel_mplock(&mpsave); 1274 return (error); 1275 } 1276 1277 static void 1278 pipe_free_kmem(struct pipe *cpipe) 1279 { 1280 if (cpipe->pipe_buffer.buffer != NULL) { 1281 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1282 atomic_subtract_int(&pipe_nbig, 1); 1283 kmem_free(&kernel_map, 1284 (vm_offset_t)cpipe->pipe_buffer.buffer, 1285 cpipe->pipe_buffer.size); 1286 cpipe->pipe_buffer.buffer = NULL; 1287 cpipe->pipe_buffer.object = NULL; 1288 } 1289 } 1290 1291 /* 1292 * Close the pipe. The slock must be held to interlock against simultanious 1293 * closes. The rlock and wlock must be held to adjust the pipe_state. 1294 */ 1295 static void 1296 pipeclose(struct pipe *cpipe) 1297 { 1298 globaldata_t gd; 1299 struct pipe *ppipe; 1300 lwkt_tokref cpipe_rlock; 1301 lwkt_tokref cpipe_wlock; 1302 lwkt_tokref ppipe_rlock; 1303 lwkt_tokref ppipe_wlock; 1304 1305 if (cpipe == NULL) 1306 return; 1307 1308 /* 1309 * The slock may not have been allocated yet (close during 1310 * initialization) 1311 * 1312 * We need both the read and write tokens to modify pipe_state. 1313 */ 1314 if (cpipe->pipe_slock) 1315 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE); 1316 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock); 1317 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock); 1318 1319 /* 1320 * Set our state, wakeup anyone waiting in select, and 1321 * wakeup anyone blocked on our pipe. 1322 */ 1323 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF; 1324 pipeselwakeup(cpipe); 1325 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1326 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1327 wakeup(cpipe); 1328 } 1329 1330 /* 1331 * Disconnect from peer. 1332 */ 1333 if ((ppipe = cpipe->pipe_peer) != NULL) { 1334 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock); 1335 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock); 1336 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF; 1337 pipeselwakeup(ppipe); 1338 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1339 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1340 wakeup(ppipe); 1341 } 1342 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) { 1343 get_mplock(); 1344 KNOTE(&ppipe->pipe_sel.si_note, 0); 1345 rel_mplock(); 1346 } 1347 lwkt_reltoken(&ppipe_rlock); 1348 lwkt_reltoken(&ppipe_wlock); 1349 } 1350 1351 /* 1352 * If the peer is also closed we can free resources for both 1353 * sides, otherwise we leave our side intact to deal with any 1354 * races (since we only have the slock). 1355 */ 1356 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) { 1357 cpipe->pipe_peer = NULL; 1358 ppipe->pipe_peer = NULL; 1359 ppipe->pipe_slock = NULL; /* we will free the slock */ 1360 pipeclose(ppipe); 1361 ppipe = NULL; 1362 } 1363 1364 lwkt_reltoken(&cpipe_rlock); 1365 lwkt_reltoken(&cpipe_wlock); 1366 if (cpipe->pipe_slock) 1367 lockmgr(cpipe->pipe_slock, LK_RELEASE); 1368 1369 /* 1370 * If we disassociated from our peer we can free resources 1371 */ 1372 if (ppipe == NULL) { 1373 gd = mycpu; 1374 if (cpipe->pipe_slock) { 1375 kfree(cpipe->pipe_slock, M_PIPE); 1376 cpipe->pipe_slock = NULL; 1377 } 1378 if (gd->gd_pipeqcount >= pipe_maxcache || 1379 cpipe->pipe_buffer.size != PIPE_SIZE 1380 ) { 1381 pipe_free_kmem(cpipe); 1382 kfree(cpipe, M_PIPE); 1383 } else { 1384 cpipe->pipe_state = 0; 1385 cpipe->pipe_peer = gd->gd_pipeq; 1386 gd->gd_pipeq = cpipe; 1387 ++gd->gd_pipeqcount; 1388 } 1389 } 1390 } 1391 1392 /* 1393 * MPALMOSTSAFE - acquires mplock 1394 */ 1395 static int 1396 pipe_kqfilter(struct file *fp, struct knote *kn) 1397 { 1398 struct pipe *cpipe; 1399 1400 get_mplock(); 1401 cpipe = (struct pipe *)kn->kn_fp->f_data; 1402 1403 switch (kn->kn_filter) { 1404 case EVFILT_READ: 1405 kn->kn_fop = &pipe_rfiltops; 1406 break; 1407 case EVFILT_WRITE: 1408 kn->kn_fop = &pipe_wfiltops; 1409 cpipe = cpipe->pipe_peer; 1410 if (cpipe == NULL) { 1411 /* other end of pipe has been closed */ 1412 rel_mplock(); 1413 return (EPIPE); 1414 } 1415 break; 1416 default: 1417 return (1); 1418 } 1419 kn->kn_hook = (caddr_t)cpipe; 1420 1421 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1422 rel_mplock(); 1423 return (0); 1424 } 1425 1426 static void 1427 filt_pipedetach(struct knote *kn) 1428 { 1429 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1430 1431 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1432 } 1433 1434 /*ARGSUSED*/ 1435 static int 1436 filt_piperead(struct knote *kn, long hint) 1437 { 1438 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1439 1440 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 1441 1442 /* XXX RACE */ 1443 if (rpipe->pipe_state & PIPE_REOF) { 1444 kn->kn_flags |= EV_EOF; 1445 return (1); 1446 } 1447 return (kn->kn_data > 0); 1448 } 1449 1450 /*ARGSUSED*/ 1451 static int 1452 filt_pipewrite(struct knote *kn, long hint) 1453 { 1454 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1455 struct pipe *wpipe = rpipe->pipe_peer; 1456 u_int32_t space; 1457 1458 /* XXX RACE */ 1459 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) { 1460 kn->kn_data = 0; 1461 kn->kn_flags |= EV_EOF; 1462 return (1); 1463 } 1464 space = wpipe->pipe_buffer.windex - 1465 wpipe->pipe_buffer.rindex; 1466 space = wpipe->pipe_buffer.size - space; 1467 kn->kn_data = space; 1468 return (kn->kn_data >= PIPE_BUF); 1469 } 1470