1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.39 2006/06/13 08:12:03 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 30 /* 31 * This code has two modes of operation, a small write mode and a large 32 * write mode. The small write mode acts like conventional pipes with 33 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 34 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 35 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 36 * the receiving process can copy it directly from the pages in the sending 37 * process. 38 * 39 * If the sending process receives a signal, it is possible that it will 40 * go away, and certainly its address space can change, because control 41 * is returned back to the user-mode side. In that case, the pipe code 42 * arranges to copy the buffer supplied by the user process, to a pageable 43 * kernel buffer, and the receiving process will grab the data from the 44 * pageable kernel buffer. Since signals don't happen all that often, 45 * the copy operation is normally eliminated. 46 * 47 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 48 * happen for small transfers so that the system will not spend all of 49 * its time context switching. PIPE_SIZE is constrained by the 50 * amount of kernel virtual memory. 51 */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/proc.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/filedesc.h> 60 #include <sys/filio.h> 61 #include <sys/ttycom.h> 62 #include <sys/stat.h> 63 #include <sys/poll.h> 64 #include <sys/select.h> 65 #include <sys/signalvar.h> 66 #include <sys/sysproto.h> 67 #include <sys/pipe.h> 68 #include <sys/vnode.h> 69 #include <sys/uio.h> 70 #include <sys/event.h> 71 #include <sys/globaldata.h> 72 #include <sys/module.h> 73 #include <sys/malloc.h> 74 #include <sys/sysctl.h> 75 #include <sys/socket.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <sys/lock.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_extern.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_zone.h> 87 88 #include <sys/file2.h> 89 90 #include <machine/cpufunc.h> 91 92 /* 93 * interfaces to the outside world 94 */ 95 static int pipe_read (struct file *fp, struct uio *uio, 96 struct ucred *cred, int flags); 97 static int pipe_write (struct file *fp, struct uio *uio, 98 struct ucred *cred, int flags); 99 static int pipe_close (struct file *fp); 100 static int pipe_shutdown (struct file *fp, int how); 101 static int pipe_poll (struct file *fp, int events, struct ucred *cred); 102 static int pipe_kqfilter (struct file *fp, struct knote *kn); 103 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 104 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct ucred *cred); 105 106 static struct fileops pipeops = { 107 NULL, /* port */ 108 NULL, /* clone */ 109 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter, 110 pipe_stat, pipe_close, pipe_shutdown 111 }; 112 113 static void filt_pipedetach(struct knote *kn); 114 static int filt_piperead(struct knote *kn, long hint); 115 static int filt_pipewrite(struct knote *kn, long hint); 116 117 static struct filterops pipe_rfiltops = 118 { 1, NULL, filt_pipedetach, filt_piperead }; 119 static struct filterops pipe_wfiltops = 120 { 1, NULL, filt_pipedetach, filt_pipewrite }; 121 122 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 123 124 /* 125 * Default pipe buffer size(s), this can be kind-of large now because pipe 126 * space is pageable. The pipe code will try to maintain locality of 127 * reference for performance reasons, so small amounts of outstanding I/O 128 * will not wipe the cache. 129 */ 130 #define MINPIPESIZE (PIPE_SIZE/3) 131 #define MAXPIPESIZE (2*PIPE_SIZE/3) 132 133 /* 134 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 135 * is there so that on large systems, we don't exhaust it. 136 */ 137 #define MAXPIPEKVA (8*1024*1024) 138 139 /* 140 * Limit for direct transfers, we cannot, of course limit 141 * the amount of kva for pipes in general though. 142 */ 143 #define LIMITPIPEKVA (16*1024*1024) 144 145 /* 146 * Limit the number of "big" pipes 147 */ 148 #define LIMITBIGPIPES 32 149 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 150 151 static int pipe_maxbig = LIMITBIGPIPES; 152 static int pipe_maxcache = PIPEQ_MAX_CACHE; 153 static int pipe_nbig; 154 static int pipe_bcache_alloc; 155 static int pipe_bkmem_alloc; 156 static int pipe_dwrite_enable = 1; /* 0:copy, 1:kmem/sfbuf 2:force */ 157 static int pipe_dwrite_sfbuf = 1; /* 0:kmem_map 1:sfbufs 2:sfbufs_dmap */ 158 /* 3:sfbuf_dmap w/ forced invlpg */ 159 160 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 161 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 162 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 163 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 164 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 165 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 166 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 167 SYSCTL_INT(_kern_pipe, OID_AUTO, dwrite_enable, 168 CTLFLAG_RW, &pipe_dwrite_enable, 0, "1:enable/2:force direct writes"); 169 SYSCTL_INT(_kern_pipe, OID_AUTO, dwrite_sfbuf, 170 CTLFLAG_RW, &pipe_dwrite_sfbuf, 0, 171 "(if dwrite_enable) 0:kmem 1:sfbuf 2:sfbuf_dmap 3:sfbuf_dmap_forceinvlpg"); 172 #if !defined(NO_PIPE_SYSCTL_STATS) 173 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 174 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 175 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 176 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 177 #endif 178 179 static void pipeclose (struct pipe *cpipe); 180 static void pipe_free_kmem (struct pipe *cpipe); 181 static int pipe_create (struct pipe **cpipep); 182 static __inline int pipelock (struct pipe *cpipe, int catch); 183 static __inline void pipeunlock (struct pipe *cpipe); 184 static __inline void pipeselwakeup (struct pipe *cpipe); 185 #ifndef PIPE_NODIRECT 186 static int pipe_build_write_buffer (struct pipe *wpipe, struct uio *uio); 187 static int pipe_direct_write (struct pipe *wpipe, struct uio *uio); 188 static void pipe_clone_write_buffer (struct pipe *wpipe); 189 #endif 190 static int pipespace (struct pipe *cpipe, int size); 191 192 /* 193 * The pipe system call for the DTYPE_PIPE type of pipes 194 * 195 * pipe_ARgs(int dummy) 196 */ 197 198 /* ARGSUSED */ 199 int 200 sys_pipe(struct pipe_args *uap) 201 { 202 struct thread *td = curthread; 203 struct proc *p = td->td_proc; 204 struct file *rf, *wf; 205 struct pipe *rpipe, *wpipe; 206 int fd1, fd2, error; 207 208 KKASSERT(p); 209 210 rpipe = wpipe = NULL; 211 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 212 pipeclose(rpipe); 213 pipeclose(wpipe); 214 return (ENFILE); 215 } 216 217 rpipe->pipe_state |= PIPE_DIRECTOK; 218 wpipe->pipe_state |= PIPE_DIRECTOK; 219 220 /* 221 * Select the direct-map features to use for this pipe. Since the 222 * sysctl's can change on the fly we record the settings when the 223 * pipe is created. 224 * 225 * Generally speaking the system will default to what we consider 226 * to be the best-balanced and most stable option. Right now this 227 * is SFBUF1. Modes 2 and 3 are considered experiemental at the 228 * moment. 229 */ 230 wpipe->pipe_feature = PIPE_COPY; 231 if (pipe_dwrite_enable) { 232 switch(pipe_dwrite_sfbuf) { 233 case 0: 234 wpipe->pipe_feature = PIPE_KMEM; 235 break; 236 case 1: 237 wpipe->pipe_feature = PIPE_SFBUF1; 238 break; 239 case 2: 240 case 3: 241 wpipe->pipe_feature = PIPE_SFBUF2; 242 break; 243 } 244 } 245 rpipe->pipe_feature = wpipe->pipe_feature; 246 247 error = falloc(p, &rf, &fd1); 248 if (error) { 249 pipeclose(rpipe); 250 pipeclose(wpipe); 251 return (error); 252 } 253 uap->sysmsg_fds[0] = fd1; 254 255 /* 256 * Warning: once we've gotten past allocation of the fd for the 257 * read-side, we can only drop the read side via fdrop() in order 258 * to avoid races against processes which manage to dup() the read 259 * side while we are blocked trying to allocate the write side. 260 */ 261 rf->f_type = DTYPE_PIPE; 262 rf->f_flag = FREAD | FWRITE; 263 rf->f_ops = &pipeops; 264 rf->f_data = rpipe; 265 error = falloc(p, &wf, &fd2); 266 if (error) { 267 fsetfd(p, NULL, fd1); 268 fdrop(rf); 269 /* rpipe has been closed by fdrop(). */ 270 pipeclose(wpipe); 271 return (error); 272 } 273 wf->f_type = DTYPE_PIPE; 274 wf->f_flag = FREAD | FWRITE; 275 wf->f_ops = &pipeops; 276 wf->f_data = wpipe; 277 uap->sysmsg_fds[1] = fd2; 278 279 rpipe->pipe_peer = wpipe; 280 wpipe->pipe_peer = rpipe; 281 282 fsetfd(p, rf, fd1); 283 fsetfd(p, wf, fd2); 284 fdrop(rf); 285 fdrop(wf); 286 287 return (0); 288 } 289 290 /* 291 * Allocate kva for pipe circular buffer, the space is pageable 292 * This routine will 'realloc' the size of a pipe safely, if it fails 293 * it will retain the old buffer. 294 * If it fails it will return ENOMEM. 295 */ 296 static int 297 pipespace(struct pipe *cpipe, int size) 298 { 299 struct vm_object *object; 300 caddr_t buffer; 301 int npages, error; 302 303 npages = round_page(size) / PAGE_SIZE; 304 object = cpipe->pipe_buffer.object; 305 306 /* 307 * [re]create the object if necessary and reserve space for it 308 * in the kernel_map. The object and memory are pageable. On 309 * success, free the old resources before assigning the new 310 * ones. 311 */ 312 if (object == NULL || object->size != npages) { 313 object = vm_object_allocate(OBJT_DEFAULT, npages); 314 buffer = (caddr_t) vm_map_min(kernel_map); 315 316 error = vm_map_find(kernel_map, object, 0, 317 (vm_offset_t *) &buffer, size, 1, 318 VM_PROT_ALL, VM_PROT_ALL, 0); 319 320 if (error != KERN_SUCCESS) { 321 vm_object_deallocate(object); 322 return (ENOMEM); 323 } 324 pipe_free_kmem(cpipe); 325 cpipe->pipe_buffer.object = object; 326 cpipe->pipe_buffer.buffer = buffer; 327 cpipe->pipe_buffer.size = size; 328 ++pipe_bkmem_alloc; 329 } else { 330 ++pipe_bcache_alloc; 331 } 332 cpipe->pipe_buffer.in = 0; 333 cpipe->pipe_buffer.out = 0; 334 cpipe->pipe_buffer.cnt = 0; 335 return (0); 336 } 337 338 /* 339 * Initialize and allocate VM and memory for pipe, pulling the pipe from 340 * our per-cpu cache if possible. For now make sure it is sized for the 341 * smaller PIPE_SIZE default. 342 */ 343 static int 344 pipe_create(cpipep) 345 struct pipe **cpipep; 346 { 347 globaldata_t gd = mycpu; 348 struct pipe *cpipe; 349 int error; 350 351 if ((cpipe = gd->gd_pipeq) != NULL) { 352 gd->gd_pipeq = cpipe->pipe_peer; 353 --gd->gd_pipeqcount; 354 cpipe->pipe_peer = NULL; 355 } else { 356 cpipe = malloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 357 } 358 *cpipep = cpipe; 359 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 360 return (error); 361 vfs_timestamp(&cpipe->pipe_ctime); 362 cpipe->pipe_atime = cpipe->pipe_ctime; 363 cpipe->pipe_mtime = cpipe->pipe_ctime; 364 return (0); 365 } 366 367 368 /* 369 * lock a pipe for I/O, blocking other access 370 */ 371 static __inline int 372 pipelock(cpipe, catch) 373 struct pipe *cpipe; 374 int catch; 375 { 376 int error; 377 378 while (cpipe->pipe_state & PIPE_LOCK) { 379 cpipe->pipe_state |= PIPE_LWANT; 380 error = tsleep(cpipe, (catch ? PCATCH : 0), "pipelk", 0); 381 if (error != 0) 382 return (error); 383 } 384 cpipe->pipe_state |= PIPE_LOCK; 385 return (0); 386 } 387 388 /* 389 * unlock a pipe I/O lock 390 */ 391 static __inline void 392 pipeunlock(cpipe) 393 struct pipe *cpipe; 394 { 395 396 cpipe->pipe_state &= ~PIPE_LOCK; 397 if (cpipe->pipe_state & PIPE_LWANT) { 398 cpipe->pipe_state &= ~PIPE_LWANT; 399 wakeup(cpipe); 400 } 401 } 402 403 static __inline void 404 pipeselwakeup(cpipe) 405 struct pipe *cpipe; 406 { 407 408 if (cpipe->pipe_state & PIPE_SEL) { 409 cpipe->pipe_state &= ~PIPE_SEL; 410 selwakeup(&cpipe->pipe_sel); 411 } 412 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 413 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 414 KNOTE(&cpipe->pipe_sel.si_note, 0); 415 } 416 417 /* 418 * MPALMOSTSAFE (acquires mplock) 419 */ 420 static int 421 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 422 { 423 struct pipe *rpipe; 424 int error; 425 int nread = 0; 426 int nbio; 427 u_int size; 428 429 get_mplock(); 430 rpipe = (struct pipe *) fp->f_data; 431 ++rpipe->pipe_busy; 432 error = pipelock(rpipe, 1); 433 if (error) 434 goto unlocked_error; 435 436 if (fflags & O_FBLOCKING) 437 nbio = 0; 438 else if (fflags & O_FNONBLOCKING) 439 nbio = 1; 440 else if (fp->f_flag & O_NONBLOCK) 441 nbio = 1; 442 else 443 nbio = 0; 444 445 while (uio->uio_resid) { 446 caddr_t va; 447 448 if (rpipe->pipe_buffer.cnt > 0) { 449 /* 450 * normal pipe buffer receive 451 */ 452 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 453 if (size > rpipe->pipe_buffer.cnt) 454 size = rpipe->pipe_buffer.cnt; 455 if (size > (u_int) uio->uio_resid) 456 size = (u_int) uio->uio_resid; 457 458 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 459 size, uio); 460 if (error) 461 break; 462 463 rpipe->pipe_buffer.out += size; 464 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 465 rpipe->pipe_buffer.out = 0; 466 467 rpipe->pipe_buffer.cnt -= size; 468 469 /* 470 * If there is no more to read in the pipe, reset 471 * its pointers to the beginning. This improves 472 * cache hit stats. 473 */ 474 if (rpipe->pipe_buffer.cnt == 0) { 475 rpipe->pipe_buffer.in = 0; 476 rpipe->pipe_buffer.out = 0; 477 } 478 nread += size; 479 #ifndef PIPE_NODIRECT 480 } else if (rpipe->pipe_kva && 481 rpipe->pipe_feature == PIPE_KMEM && 482 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 483 == PIPE_DIRECTW 484 ) { 485 /* 486 * Direct copy using source-side kva mapping 487 */ 488 size = rpipe->pipe_map.xio_bytes - 489 rpipe->pipe_buffer.out; 490 if (size > (u_int)uio->uio_resid) 491 size = (u_int)uio->uio_resid; 492 va = (caddr_t)rpipe->pipe_kva + 493 xio_kvaoffset(&rpipe->pipe_map, rpipe->pipe_buffer.out); 494 error = uiomove(va, size, uio); 495 if (error) 496 break; 497 nread += size; 498 rpipe->pipe_buffer.out += size; 499 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 500 rpipe->pipe_state |= PIPE_DIRECTIP; 501 rpipe->pipe_state &= ~PIPE_DIRECTW; 502 /* reset out index for copy mode */ 503 rpipe->pipe_buffer.out = 0; 504 wakeup(rpipe); 505 } 506 } else if (rpipe->pipe_buffer.out != rpipe->pipe_map.xio_bytes && 507 rpipe->pipe_kva && 508 rpipe->pipe_feature == PIPE_SFBUF2 && 509 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 510 == PIPE_DIRECTW 511 ) { 512 /* 513 * Direct copy, bypassing a kernel buffer. We cannot 514 * mess with the direct-write buffer until 515 * PIPE_DIRECTIP is cleared. In order to prevent 516 * the pipe_write code from racing itself in 517 * direct_write, we set DIRECTIP when we clear 518 * DIRECTW after we have exhausted the buffer. 519 */ 520 if (pipe_dwrite_sfbuf == 3) 521 rpipe->pipe_kvamask = 0; 522 pmap_qenter2(rpipe->pipe_kva, rpipe->pipe_map.xio_pages, 523 rpipe->pipe_map.xio_npages, 524 &rpipe->pipe_kvamask); 525 size = rpipe->pipe_map.xio_bytes - 526 rpipe->pipe_buffer.out; 527 if (size > (u_int)uio->uio_resid) 528 size = (u_int)uio->uio_resid; 529 va = (caddr_t)rpipe->pipe_kva + xio_kvaoffset(&rpipe->pipe_map, rpipe->pipe_buffer.out); 530 error = uiomove(va, size, uio); 531 if (error) 532 break; 533 nread += size; 534 rpipe->pipe_buffer.out += size; 535 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 536 rpipe->pipe_state |= PIPE_DIRECTIP; 537 rpipe->pipe_state &= ~PIPE_DIRECTW; 538 /* reset out index for copy mode */ 539 rpipe->pipe_buffer.out = 0; 540 wakeup(rpipe); 541 } 542 } else if (rpipe->pipe_buffer.out != rpipe->pipe_map.xio_bytes && 543 rpipe->pipe_feature == PIPE_SFBUF1 && 544 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 545 == PIPE_DIRECTW 546 ) { 547 /* 548 * Direct copy, bypassing a kernel buffer. We cannot 549 * mess with the direct-write buffer until 550 * PIPE_DIRECTIP is cleared. In order to prevent 551 * the pipe_write code from racing itself in 552 * direct_write, we set DIRECTIP when we clear 553 * DIRECTW after we have exhausted the buffer. 554 */ 555 error = xio_uio_copy(&rpipe->pipe_map, rpipe->pipe_buffer.out, uio, &size); 556 if (error) 557 break; 558 nread += size; 559 rpipe->pipe_buffer.out += size; 560 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 561 rpipe->pipe_state |= PIPE_DIRECTIP; 562 rpipe->pipe_state &= ~PIPE_DIRECTW; 563 /* reset out index for copy mode */ 564 rpipe->pipe_buffer.out = 0; 565 wakeup(rpipe); 566 } 567 #endif 568 } else { 569 /* 570 * detect EOF condition 571 * read returns 0 on EOF, no need to set error 572 */ 573 if (rpipe->pipe_state & PIPE_EOF) 574 break; 575 576 /* 577 * If the "write-side" has been blocked, wake it up now. 578 */ 579 if (rpipe->pipe_state & PIPE_WANTW) { 580 rpipe->pipe_state &= ~PIPE_WANTW; 581 wakeup(rpipe); 582 } 583 584 /* 585 * Break if some data was read. 586 */ 587 if (nread > 0) 588 break; 589 590 /* 591 * Unlock the pipe buffer for our remaining 592 * processing. We will either break out with an 593 * error or we will sleep and relock to loop. 594 */ 595 pipeunlock(rpipe); 596 597 /* 598 * Handle non-blocking mode operation or 599 * wait for more data. 600 */ 601 if (nbio) { 602 error = EAGAIN; 603 } else { 604 rpipe->pipe_state |= PIPE_WANTR; 605 if ((error = tsleep(rpipe, PCATCH|PNORESCHED, 606 "piperd", 0)) == 0) { 607 error = pipelock(rpipe, 1); 608 } 609 } 610 if (error) 611 goto unlocked_error; 612 } 613 } 614 pipeunlock(rpipe); 615 616 if (error == 0) 617 vfs_timestamp(&rpipe->pipe_atime); 618 unlocked_error: 619 --rpipe->pipe_busy; 620 621 /* 622 * PIPE_WANT processing only makes sense if pipe_busy is 0. 623 */ 624 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 625 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 626 wakeup(rpipe); 627 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 628 /* 629 * Handle write blocking hysteresis. 630 */ 631 if (rpipe->pipe_state & PIPE_WANTW) { 632 rpipe->pipe_state &= ~PIPE_WANTW; 633 wakeup(rpipe); 634 } 635 } 636 637 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 638 pipeselwakeup(rpipe); 639 rel_mplock(); 640 return (error); 641 } 642 643 #ifndef PIPE_NODIRECT 644 /* 645 * Map the sending processes' buffer into kernel space and wire it. 646 * This is similar to a physical write operation. 647 */ 648 static int 649 pipe_build_write_buffer(wpipe, uio) 650 struct pipe *wpipe; 651 struct uio *uio; 652 { 653 int error; 654 u_int size; 655 656 size = (u_int) uio->uio_iov->iov_len; 657 if (size > wpipe->pipe_buffer.size) 658 size = wpipe->pipe_buffer.size; 659 660 if (uio->uio_segflg == UIO_SYSSPACE) { 661 error = xio_init_kbuf(&wpipe->pipe_map, uio->uio_iov->iov_base, 662 size); 663 } else { 664 error = xio_init_ubuf(&wpipe->pipe_map, uio->uio_iov->iov_base, 665 size, XIOF_READ); 666 } 667 wpipe->pipe_buffer.out = 0; 668 if (error) 669 return(error); 670 671 /* 672 * Create a kernel map for KMEM and SFBUF2 copy modes. SFBUF2 will 673 * map the pages on the target while KMEM maps the pages now. 674 */ 675 switch(wpipe->pipe_feature) { 676 case PIPE_KMEM: 677 case PIPE_SFBUF2: 678 if (wpipe->pipe_kva == NULL) { 679 wpipe->pipe_kva = 680 kmem_alloc_nofault(kernel_map, XIO_INTERNAL_SIZE); 681 wpipe->pipe_kvamask = 0; 682 } 683 if (wpipe->pipe_feature == PIPE_KMEM) { 684 pmap_qenter(wpipe->pipe_kva, wpipe->pipe_map.xio_pages, 685 wpipe->pipe_map.xio_npages); 686 } 687 break; 688 default: 689 break; 690 } 691 692 /* 693 * And update the uio data. The XIO might have loaded fewer bytes 694 * then requested so reload 'size'. 695 */ 696 size = wpipe->pipe_map.xio_bytes; 697 uio->uio_iov->iov_len -= size; 698 uio->uio_iov->iov_base += size; 699 if (uio->uio_iov->iov_len == 0) 700 uio->uio_iov++; 701 uio->uio_resid -= size; 702 uio->uio_offset += size; 703 return (0); 704 } 705 706 /* 707 * In the case of a signal, the writing process might go away. This 708 * code copies the data into the circular buffer so that the source 709 * pages can be freed without loss of data. 710 * 711 * Note that in direct mode pipe_buffer.out is used to track the 712 * XIO offset. We are converting the direct mode into buffered mode 713 * which changes the meaning of pipe_buffer.out. 714 */ 715 static void 716 pipe_clone_write_buffer(wpipe) 717 struct pipe *wpipe; 718 { 719 int size; 720 int offset; 721 722 offset = wpipe->pipe_buffer.out; 723 size = wpipe->pipe_map.xio_bytes - offset; 724 725 KKASSERT(size <= wpipe->pipe_buffer.size); 726 727 wpipe->pipe_buffer.in = size; 728 wpipe->pipe_buffer.out = 0; 729 wpipe->pipe_buffer.cnt = size; 730 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTIP); 731 732 xio_copy_xtok(&wpipe->pipe_map, offset, wpipe->pipe_buffer.buffer, size); 733 xio_release(&wpipe->pipe_map); 734 if (wpipe->pipe_kva) { 735 pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); 736 kmem_free(kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); 737 wpipe->pipe_kva = NULL; 738 } 739 } 740 741 /* 742 * This implements the pipe buffer write mechanism. Note that only 743 * a direct write OR a normal pipe write can be pending at any given time. 744 * If there are any characters in the pipe buffer, the direct write will 745 * be deferred until the receiving process grabs all of the bytes from 746 * the pipe buffer. Then the direct mapping write is set-up. 747 */ 748 static int 749 pipe_direct_write(wpipe, uio) 750 struct pipe *wpipe; 751 struct uio *uio; 752 { 753 int error; 754 755 retry: 756 while (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 757 if (wpipe->pipe_state & PIPE_WANTR) { 758 wpipe->pipe_state &= ~PIPE_WANTR; 759 wakeup(wpipe); 760 } 761 wpipe->pipe_state |= PIPE_WANTW; 762 error = tsleep(wpipe, PCATCH, "pipdww", 0); 763 if (error) 764 goto error2; 765 if (wpipe->pipe_state & PIPE_EOF) { 766 error = EPIPE; 767 goto error2; 768 } 769 } 770 KKASSERT(wpipe->pipe_map.xio_bytes == 0); 771 if (wpipe->pipe_buffer.cnt > 0) { 772 if (wpipe->pipe_state & PIPE_WANTR) { 773 wpipe->pipe_state &= ~PIPE_WANTR; 774 wakeup(wpipe); 775 } 776 777 wpipe->pipe_state |= PIPE_WANTW; 778 error = tsleep(wpipe, PCATCH, "pipdwc", 0); 779 if (error) 780 goto error2; 781 if (wpipe->pipe_state & PIPE_EOF) { 782 error = EPIPE; 783 goto error2; 784 } 785 goto retry; 786 } 787 788 /* 789 * Build our direct-write buffer 790 */ 791 wpipe->pipe_state |= PIPE_DIRECTW | PIPE_DIRECTIP; 792 error = pipe_build_write_buffer(wpipe, uio); 793 if (error) 794 goto error1; 795 wpipe->pipe_state &= ~PIPE_DIRECTIP; 796 797 /* 798 * Wait until the receiver has snarfed the data. Since we are likely 799 * going to sleep we optimize the case and yield synchronously, 800 * possibly avoiding the tsleep(). 801 */ 802 error = 0; 803 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 804 if (wpipe->pipe_state & PIPE_EOF) { 805 pipelock(wpipe, 0); 806 xio_release(&wpipe->pipe_map); 807 if (wpipe->pipe_kva) { 808 pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); 809 kmem_free(kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); 810 wpipe->pipe_kva = NULL; 811 } 812 pipeunlock(wpipe); 813 pipeselwakeup(wpipe); 814 error = EPIPE; 815 goto error1; 816 } 817 if (wpipe->pipe_state & PIPE_WANTR) { 818 wpipe->pipe_state &= ~PIPE_WANTR; 819 wakeup(wpipe); 820 } 821 pipeselwakeup(wpipe); 822 error = tsleep(wpipe, PCATCH|PNORESCHED, "pipdwt", 0); 823 } 824 pipelock(wpipe,0); 825 if (wpipe->pipe_state & PIPE_DIRECTW) { 826 /* 827 * this bit of trickery substitutes a kernel buffer for 828 * the process that might be going away. 829 */ 830 pipe_clone_write_buffer(wpipe); 831 KKASSERT((wpipe->pipe_state & PIPE_DIRECTIP) == 0); 832 } else { 833 /* 834 * note: The pipe_kva mapping is not qremove'd here. For 835 * legacy PIPE_KMEM mode this constitutes an improvement 836 * over the original FreeBSD-4 algorithm. For PIPE_SFBUF2 837 * mode the kva mapping must not be removed to get the 838 * caching benefit. 839 * 840 * For testing purposes we will give the original algorithm 841 * the benefit of the doubt 'what it could have been', and 842 * keep the optimization. 843 */ 844 KKASSERT(wpipe->pipe_state & PIPE_DIRECTIP); 845 xio_release(&wpipe->pipe_map); 846 wpipe->pipe_state &= ~PIPE_DIRECTIP; 847 } 848 pipeunlock(wpipe); 849 return (error); 850 851 /* 852 * Direct-write error, clear the direct write flags. 853 */ 854 error1: 855 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTIP); 856 /* fallthrough */ 857 858 /* 859 * General error, wakeup the other side if it happens to be sleeping. 860 */ 861 error2: 862 wakeup(wpipe); 863 return (error); 864 } 865 #endif 866 867 /* 868 * MPALMOSTSAFE - acquires mplock 869 */ 870 static int 871 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 872 { 873 int error = 0; 874 int orig_resid; 875 int nbio; 876 struct pipe *wpipe, *rpipe; 877 878 get_mplock(); 879 rpipe = (struct pipe *) fp->f_data; 880 wpipe = rpipe->pipe_peer; 881 882 /* 883 * detect loss of pipe read side, issue SIGPIPE if lost. 884 */ 885 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 886 rel_mplock(); 887 return (EPIPE); 888 } 889 ++wpipe->pipe_busy; 890 891 if (fflags & O_FBLOCKING) 892 nbio = 0; 893 else if (fflags & O_FNONBLOCKING) 894 nbio = 1; 895 else if (fp->f_flag & O_NONBLOCK) 896 nbio = 1; 897 else 898 nbio = 0; 899 900 /* 901 * If it is advantageous to resize the pipe buffer, do 902 * so. 903 */ 904 if ((uio->uio_resid > PIPE_SIZE) && 905 (pipe_nbig < pipe_maxbig) && 906 (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) == 0 && 907 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 908 (wpipe->pipe_buffer.cnt == 0)) { 909 910 if ((error = pipelock(wpipe,1)) == 0) { 911 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 912 pipe_nbig++; 913 pipeunlock(wpipe); 914 } 915 } 916 917 /* 918 * If an early error occured unbusy and return, waking up any pending 919 * readers. 920 */ 921 if (error) { 922 --wpipe->pipe_busy; 923 if ((wpipe->pipe_busy == 0) && 924 (wpipe->pipe_state & PIPE_WANT)) { 925 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 926 wakeup(wpipe); 927 } 928 rel_mplock(); 929 return(error); 930 } 931 932 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone")); 933 934 orig_resid = uio->uio_resid; 935 936 while (uio->uio_resid) { 937 int space; 938 939 #ifndef PIPE_NODIRECT 940 /* 941 * If the transfer is large, we can gain performance if 942 * we do process-to-process copies directly. 943 * If the write is non-blocking, we don't use the 944 * direct write mechanism. 945 * 946 * The direct write mechanism will detect the reader going 947 * away on us. 948 */ 949 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT || 950 pipe_dwrite_enable > 1) && 951 nbio == 0 && 952 pipe_dwrite_enable) { 953 error = pipe_direct_write( wpipe, uio); 954 if (error) 955 break; 956 continue; 957 } 958 #endif 959 960 /* 961 * Pipe buffered writes cannot be coincidental with 962 * direct writes. We wait until the currently executing 963 * direct write is completed before we start filling the 964 * pipe buffer. We break out if a signal occurs or the 965 * reader goes away. 966 */ 967 retrywrite: 968 while (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 969 if (wpipe->pipe_state & PIPE_WANTR) { 970 wpipe->pipe_state &= ~PIPE_WANTR; 971 wakeup(wpipe); 972 } 973 error = tsleep(wpipe, PCATCH, "pipbww", 0); 974 if (wpipe->pipe_state & PIPE_EOF) 975 break; 976 if (error) 977 break; 978 } 979 if (wpipe->pipe_state & PIPE_EOF) { 980 error = EPIPE; 981 break; 982 } 983 984 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 985 986 /* Writes of size <= PIPE_BUF must be atomic. */ 987 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 988 space = 0; 989 990 /* 991 * Write to fill, read size handles write hysteresis. Also 992 * additional restrictions can cause select-based non-blocking 993 * writes to spin. 994 */ 995 if (space > 0) { 996 if ((error = pipelock(wpipe,1)) == 0) { 997 int size; /* Transfer size */ 998 int segsize; /* first segment to transfer */ 999 1000 /* 1001 * It is possible for a direct write to 1002 * slip in on us... handle it here... 1003 */ 1004 if (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 1005 pipeunlock(wpipe); 1006 goto retrywrite; 1007 } 1008 /* 1009 * If a process blocked in uiomove, our 1010 * value for space might be bad. 1011 * 1012 * XXX will we be ok if the reader has gone 1013 * away here? 1014 */ 1015 if (space > wpipe->pipe_buffer.size - 1016 wpipe->pipe_buffer.cnt) { 1017 pipeunlock(wpipe); 1018 goto retrywrite; 1019 } 1020 1021 /* 1022 * Transfer size is minimum of uio transfer 1023 * and free space in pipe buffer. 1024 */ 1025 if (space > uio->uio_resid) 1026 size = uio->uio_resid; 1027 else 1028 size = space; 1029 /* 1030 * First segment to transfer is minimum of 1031 * transfer size and contiguous space in 1032 * pipe buffer. If first segment to transfer 1033 * is less than the transfer size, we've got 1034 * a wraparound in the buffer. 1035 */ 1036 segsize = wpipe->pipe_buffer.size - 1037 wpipe->pipe_buffer.in; 1038 if (segsize > size) 1039 segsize = size; 1040 1041 /* Transfer first segment */ 1042 1043 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1044 segsize, uio); 1045 1046 if (error == 0 && segsize < size) { 1047 /* 1048 * Transfer remaining part now, to 1049 * support atomic writes. Wraparound 1050 * happened. 1051 */ 1052 if (wpipe->pipe_buffer.in + segsize != 1053 wpipe->pipe_buffer.size) 1054 panic("Expected pipe buffer wraparound disappeared"); 1055 1056 error = uiomove(&wpipe->pipe_buffer.buffer[0], 1057 size - segsize, uio); 1058 } 1059 if (error == 0) { 1060 wpipe->pipe_buffer.in += size; 1061 if (wpipe->pipe_buffer.in >= 1062 wpipe->pipe_buffer.size) { 1063 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 1064 panic("Expected wraparound bad"); 1065 wpipe->pipe_buffer.in = size - segsize; 1066 } 1067 1068 wpipe->pipe_buffer.cnt += size; 1069 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 1070 panic("Pipe buffer overflow"); 1071 1072 } 1073 pipeunlock(wpipe); 1074 } 1075 if (error) 1076 break; 1077 1078 } else { 1079 /* 1080 * If the "read-side" has been blocked, wake it up now 1081 * and yield to let it drain synchronously rather 1082 * then block. 1083 */ 1084 if (wpipe->pipe_state & PIPE_WANTR) { 1085 wpipe->pipe_state &= ~PIPE_WANTR; 1086 wakeup(wpipe); 1087 } 1088 1089 /* 1090 * don't block on non-blocking I/O 1091 */ 1092 if (nbio) { 1093 error = EAGAIN; 1094 break; 1095 } 1096 1097 /* 1098 * We have no more space and have something to offer, 1099 * wake up select/poll. 1100 */ 1101 pipeselwakeup(wpipe); 1102 1103 wpipe->pipe_state |= PIPE_WANTW; 1104 error = tsleep(wpipe, PCATCH|PNORESCHED, "pipewr", 0); 1105 if (error != 0) 1106 break; 1107 /* 1108 * If read side wants to go away, we just issue a signal 1109 * to ourselves. 1110 */ 1111 if (wpipe->pipe_state & PIPE_EOF) { 1112 error = EPIPE; 1113 break; 1114 } 1115 } 1116 } 1117 1118 --wpipe->pipe_busy; 1119 1120 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1121 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1122 wakeup(wpipe); 1123 } else if (wpipe->pipe_buffer.cnt > 0) { 1124 /* 1125 * If we have put any characters in the buffer, we wake up 1126 * the reader. 1127 */ 1128 if (wpipe->pipe_state & PIPE_WANTR) { 1129 wpipe->pipe_state &= ~PIPE_WANTR; 1130 wakeup(wpipe); 1131 } 1132 } 1133 1134 /* 1135 * Don't return EPIPE if I/O was successful 1136 */ 1137 if ((wpipe->pipe_buffer.cnt == 0) && 1138 (uio->uio_resid == 0) && 1139 (error == EPIPE)) { 1140 error = 0; 1141 } 1142 1143 if (error == 0) 1144 vfs_timestamp(&wpipe->pipe_mtime); 1145 1146 /* 1147 * We have something to offer, 1148 * wake up select/poll. 1149 */ 1150 if (wpipe->pipe_buffer.cnt) 1151 pipeselwakeup(wpipe); 1152 rel_mplock(); 1153 return (error); 1154 } 1155 1156 /* 1157 * MPALMOSTSAFE - acquires mplock 1158 * 1159 * we implement a very minimal set of ioctls for compatibility with sockets. 1160 */ 1161 int 1162 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred) 1163 { 1164 struct pipe *mpipe; 1165 int error; 1166 1167 get_mplock(); 1168 mpipe = (struct pipe *)fp->f_data; 1169 1170 switch (cmd) { 1171 case FIOASYNC: 1172 if (*(int *)data) { 1173 mpipe->pipe_state |= PIPE_ASYNC; 1174 } else { 1175 mpipe->pipe_state &= ~PIPE_ASYNC; 1176 } 1177 error = 0; 1178 break; 1179 case FIONREAD: 1180 if (mpipe->pipe_state & PIPE_DIRECTW) { 1181 *(int *)data = mpipe->pipe_map.xio_bytes - 1182 mpipe->pipe_buffer.out; 1183 } else { 1184 *(int *)data = mpipe->pipe_buffer.cnt; 1185 } 1186 error = 0; 1187 break; 1188 case FIOSETOWN: 1189 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1190 break; 1191 case FIOGETOWN: 1192 *(int *)data = fgetown(mpipe->pipe_sigio); 1193 error = 0; 1194 break; 1195 case TIOCSPGRP: 1196 /* This is deprecated, FIOSETOWN should be used instead. */ 1197 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1198 break; 1199 1200 case TIOCGPGRP: 1201 /* This is deprecated, FIOGETOWN should be used instead. */ 1202 *(int *)data = -fgetown(mpipe->pipe_sigio); 1203 error = 0; 1204 break; 1205 default: 1206 error = ENOTTY; 1207 break; 1208 } 1209 rel_mplock(); 1210 return (error); 1211 } 1212 1213 /* 1214 * MPALMOSTSAFE - acquires mplock 1215 */ 1216 int 1217 pipe_poll(struct file *fp, int events, struct ucred *cred) 1218 { 1219 struct pipe *rpipe; 1220 struct pipe *wpipe; 1221 int revents = 0; 1222 1223 get_mplock(); 1224 rpipe = (struct pipe *)fp->f_data; 1225 wpipe = rpipe->pipe_peer; 1226 if (events & (POLLIN | POLLRDNORM)) 1227 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1228 (rpipe->pipe_buffer.cnt > 0) || 1229 (rpipe->pipe_state & PIPE_EOF)) 1230 revents |= events & (POLLIN | POLLRDNORM); 1231 1232 if (events & (POLLOUT | POLLWRNORM)) 1233 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1234 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1235 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1236 revents |= events & (POLLOUT | POLLWRNORM); 1237 1238 if ((rpipe->pipe_state & PIPE_EOF) || 1239 (wpipe == NULL) || 1240 (wpipe->pipe_state & PIPE_EOF)) 1241 revents |= POLLHUP; 1242 1243 if (revents == 0) { 1244 if (events & (POLLIN | POLLRDNORM)) { 1245 selrecord(curthread, &rpipe->pipe_sel); 1246 rpipe->pipe_state |= PIPE_SEL; 1247 } 1248 1249 if (events & (POLLOUT | POLLWRNORM)) { 1250 selrecord(curthread, &wpipe->pipe_sel); 1251 wpipe->pipe_state |= PIPE_SEL; 1252 } 1253 } 1254 rel_mplock(); 1255 return (revents); 1256 } 1257 1258 /* 1259 * MPALMOSTSAFE - acquires mplock 1260 */ 1261 static int 1262 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1263 { 1264 struct pipe *pipe; 1265 1266 get_mplock(); 1267 pipe = (struct pipe *)fp->f_data; 1268 1269 bzero((caddr_t)ub, sizeof(*ub)); 1270 ub->st_mode = S_IFIFO; 1271 ub->st_blksize = pipe->pipe_buffer.size; 1272 ub->st_size = pipe->pipe_buffer.cnt; 1273 if (ub->st_size == 0 && (pipe->pipe_state & PIPE_DIRECTW)) { 1274 ub->st_size = pipe->pipe_map.xio_bytes - 1275 pipe->pipe_buffer.out; 1276 } 1277 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1278 ub->st_atimespec = pipe->pipe_atime; 1279 ub->st_mtimespec = pipe->pipe_mtime; 1280 ub->st_ctimespec = pipe->pipe_ctime; 1281 /* 1282 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1283 * st_flags, st_gen. 1284 * XXX (st_dev, st_ino) should be unique. 1285 */ 1286 rel_mplock(); 1287 return (0); 1288 } 1289 1290 /* 1291 * MPALMOSTSAFE - acquires mplock 1292 */ 1293 static int 1294 pipe_close(struct file *fp) 1295 { 1296 struct pipe *cpipe = (struct pipe *)fp->f_data; 1297 1298 get_mplock(); 1299 fp->f_ops = &badfileops; 1300 fp->f_data = NULL; 1301 funsetown(cpipe->pipe_sigio); 1302 pipeclose(cpipe); 1303 rel_mplock(); 1304 return (0); 1305 } 1306 1307 /* 1308 * Shutdown one or both directions of a full-duplex pipe. 1309 * 1310 * MPALMOSTSAFE - acquires mplock 1311 */ 1312 static int 1313 pipe_shutdown(struct file *fp, int how) 1314 { 1315 struct pipe *rpipe; 1316 struct pipe *wpipe; 1317 int error = EPIPE; 1318 1319 get_mplock(); 1320 rpipe = (struct pipe *)fp->f_data; 1321 1322 switch(how) { 1323 case SHUT_RDWR: 1324 case SHUT_RD: 1325 if (rpipe) { 1326 rpipe->pipe_state |= PIPE_EOF; 1327 pipeselwakeup(rpipe); 1328 if (rpipe->pipe_busy) 1329 wakeup(rpipe); 1330 error = 0; 1331 } 1332 if (how == SHUT_RD) 1333 break; 1334 /* fall through */ 1335 case SHUT_WR: 1336 if (rpipe && (wpipe = rpipe->pipe_peer) != NULL) { 1337 wpipe->pipe_state |= PIPE_EOF; 1338 pipeselwakeup(wpipe); 1339 if (wpipe->pipe_busy) 1340 wakeup(wpipe); 1341 error = 0; 1342 } 1343 } 1344 rel_mplock(); 1345 return (error); 1346 } 1347 1348 static void 1349 pipe_free_kmem(struct pipe *cpipe) 1350 { 1351 if (cpipe->pipe_buffer.buffer != NULL) { 1352 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1353 --pipe_nbig; 1354 kmem_free(kernel_map, 1355 (vm_offset_t)cpipe->pipe_buffer.buffer, 1356 cpipe->pipe_buffer.size); 1357 cpipe->pipe_buffer.buffer = NULL; 1358 cpipe->pipe_buffer.object = NULL; 1359 } 1360 #ifndef PIPE_NODIRECT 1361 KKASSERT(cpipe->pipe_map.xio_bytes == 0 && 1362 cpipe->pipe_map.xio_offset == 0 && 1363 cpipe->pipe_map.xio_npages == 0); 1364 #endif 1365 } 1366 1367 /* 1368 * shutdown the pipe 1369 */ 1370 static void 1371 pipeclose(struct pipe *cpipe) 1372 { 1373 globaldata_t gd; 1374 struct pipe *ppipe; 1375 1376 if (cpipe == NULL) 1377 return; 1378 1379 pipeselwakeup(cpipe); 1380 1381 /* 1382 * If the other side is blocked, wake it up saying that 1383 * we want to close it down. 1384 */ 1385 while (cpipe->pipe_busy) { 1386 wakeup(cpipe); 1387 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1388 tsleep(cpipe, 0, "pipecl", 0); 1389 } 1390 1391 /* 1392 * Disconnect from peer 1393 */ 1394 if ((ppipe = cpipe->pipe_peer) != NULL) { 1395 pipeselwakeup(ppipe); 1396 1397 ppipe->pipe_state |= PIPE_EOF; 1398 wakeup(ppipe); 1399 KNOTE(&ppipe->pipe_sel.si_note, 0); 1400 ppipe->pipe_peer = NULL; 1401 } 1402 1403 if (cpipe->pipe_kva) { 1404 pmap_qremove(cpipe->pipe_kva, XIO_INTERNAL_PAGES); 1405 kmem_free(kernel_map, cpipe->pipe_kva, XIO_INTERNAL_SIZE); 1406 cpipe->pipe_kva = NULL; 1407 } 1408 1409 /* 1410 * free or cache resources 1411 */ 1412 gd = mycpu; 1413 if (gd->gd_pipeqcount >= pipe_maxcache || 1414 cpipe->pipe_buffer.size != PIPE_SIZE 1415 ) { 1416 pipe_free_kmem(cpipe); 1417 free(cpipe, M_PIPE); 1418 } else { 1419 KKASSERT(cpipe->pipe_map.xio_npages == 0 && 1420 cpipe->pipe_map.xio_bytes == 0 && 1421 cpipe->pipe_map.xio_offset == 0); 1422 cpipe->pipe_state = 0; 1423 cpipe->pipe_busy = 0; 1424 cpipe->pipe_peer = gd->gd_pipeq; 1425 gd->gd_pipeq = cpipe; 1426 ++gd->gd_pipeqcount; 1427 } 1428 } 1429 1430 /* 1431 * MPALMOSTSAFE - acquires mplock 1432 */ 1433 static int 1434 pipe_kqfilter(struct file *fp, struct knote *kn) 1435 { 1436 struct pipe *cpipe; 1437 1438 get_mplock(); 1439 cpipe = (struct pipe *)kn->kn_fp->f_data; 1440 1441 switch (kn->kn_filter) { 1442 case EVFILT_READ: 1443 kn->kn_fop = &pipe_rfiltops; 1444 break; 1445 case EVFILT_WRITE: 1446 kn->kn_fop = &pipe_wfiltops; 1447 cpipe = cpipe->pipe_peer; 1448 if (cpipe == NULL) { 1449 /* other end of pipe has been closed */ 1450 rel_mplock(); 1451 return (EPIPE); 1452 } 1453 break; 1454 default: 1455 return (1); 1456 } 1457 kn->kn_hook = (caddr_t)cpipe; 1458 1459 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1460 rel_mplock(); 1461 return (0); 1462 } 1463 1464 static void 1465 filt_pipedetach(struct knote *kn) 1466 { 1467 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1468 1469 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1470 } 1471 1472 /*ARGSUSED*/ 1473 static int 1474 filt_piperead(struct knote *kn, long hint) 1475 { 1476 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1477 struct pipe *wpipe = rpipe->pipe_peer; 1478 1479 kn->kn_data = rpipe->pipe_buffer.cnt; 1480 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) { 1481 kn->kn_data = rpipe->pipe_map.xio_bytes - 1482 rpipe->pipe_buffer.out; 1483 } 1484 1485 if ((rpipe->pipe_state & PIPE_EOF) || 1486 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1487 kn->kn_flags |= EV_EOF; 1488 return (1); 1489 } 1490 return (kn->kn_data > 0); 1491 } 1492 1493 /*ARGSUSED*/ 1494 static int 1495 filt_pipewrite(struct knote *kn, long hint) 1496 { 1497 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1498 struct pipe *wpipe = rpipe->pipe_peer; 1499 1500 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1501 kn->kn_data = 0; 1502 kn->kn_flags |= EV_EOF; 1503 return (1); 1504 } 1505 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1506 if (wpipe->pipe_state & PIPE_DIRECTW) 1507 kn->kn_data = 0; 1508 1509 return (kn->kn_data >= PIPE_BUF); 1510 } 1511