1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.49 2008/06/05 18:06:32 swildner Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 30 /* 31 * This code has two modes of operation, a small write mode and a large 32 * write mode. The small write mode acts like conventional pipes with 33 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 34 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 35 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 36 * the receiving process can copy it directly from the pages in the sending 37 * process. 38 * 39 * If the sending process receives a signal, it is possible that it will 40 * go away, and certainly its address space can change, because control 41 * is returned back to the user-mode side. In that case, the pipe code 42 * arranges to copy the buffer supplied by the user process, to a pageable 43 * kernel buffer, and the receiving process will grab the data from the 44 * pageable kernel buffer. Since signals don't happen all that often, 45 * the copy operation is normally eliminated. 46 * 47 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 48 * happen for small transfers so that the system will not spend all of 49 * its time context switching. PIPE_SIZE is constrained by the 50 * amount of kernel virtual memory. 51 */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/proc.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/filedesc.h> 60 #include <sys/filio.h> 61 #include <sys/ttycom.h> 62 #include <sys/stat.h> 63 #include <sys/poll.h> 64 #include <sys/select.h> 65 #include <sys/signalvar.h> 66 #include <sys/sysproto.h> 67 #include <sys/pipe.h> 68 #include <sys/vnode.h> 69 #include <sys/uio.h> 70 #include <sys/event.h> 71 #include <sys/globaldata.h> 72 #include <sys/module.h> 73 #include <sys/malloc.h> 74 #include <sys/sysctl.h> 75 #include <sys/socket.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <sys/lock.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_extern.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_zone.h> 87 88 #include <sys/file2.h> 89 90 #include <machine/cpufunc.h> 91 92 /* 93 * interfaces to the outside world 94 */ 95 static int pipe_read (struct file *fp, struct uio *uio, 96 struct ucred *cred, int flags); 97 static int pipe_write (struct file *fp, struct uio *uio, 98 struct ucred *cred, int flags); 99 static int pipe_close (struct file *fp); 100 static int pipe_shutdown (struct file *fp, int how); 101 static int pipe_poll (struct file *fp, int events, struct ucred *cred); 102 static int pipe_kqfilter (struct file *fp, struct knote *kn); 103 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 104 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct ucred *cred); 105 106 static struct fileops pipeops = { 107 .fo_read = pipe_read, 108 .fo_write = pipe_write, 109 .fo_ioctl = pipe_ioctl, 110 .fo_poll = pipe_poll, 111 .fo_kqfilter = pipe_kqfilter, 112 .fo_stat = pipe_stat, 113 .fo_close = pipe_close, 114 .fo_shutdown = pipe_shutdown 115 }; 116 117 static void filt_pipedetach(struct knote *kn); 118 static int filt_piperead(struct knote *kn, long hint); 119 static int filt_pipewrite(struct knote *kn, long hint); 120 121 static struct filterops pipe_rfiltops = 122 { 1, NULL, filt_pipedetach, filt_piperead }; 123 static struct filterops pipe_wfiltops = 124 { 1, NULL, filt_pipedetach, filt_pipewrite }; 125 126 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 127 128 /* 129 * Default pipe buffer size(s), this can be kind-of large now because pipe 130 * space is pageable. The pipe code will try to maintain locality of 131 * reference for performance reasons, so small amounts of outstanding I/O 132 * will not wipe the cache. 133 */ 134 #define MINPIPESIZE (PIPE_SIZE/3) 135 #define MAXPIPESIZE (2*PIPE_SIZE/3) 136 137 /* 138 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 139 * is there so that on large systems, we don't exhaust it. 140 */ 141 #define MAXPIPEKVA (8*1024*1024) 142 143 /* 144 * Limit for direct transfers, we cannot, of course limit 145 * the amount of kva for pipes in general though. 146 */ 147 #define LIMITPIPEKVA (16*1024*1024) 148 149 /* 150 * Limit the number of "big" pipes 151 */ 152 #define LIMITBIGPIPES 32 153 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 154 155 static int pipe_maxbig = LIMITBIGPIPES; 156 static int pipe_maxcache = PIPEQ_MAX_CACHE; 157 static int pipe_bigcount; 158 static int pipe_nbig; 159 static int pipe_bcache_alloc; 160 static int pipe_bkmem_alloc; 161 /* 162 * There's a bug in the sfbuf-based direct write code, not yet located. 163 * Disable it for now. 164 */ 165 static int pipe_dwrite_enable = 0; /* 0:copy, 1:kmem/sfbuf 2:force */ 166 static int pipe_dwrite_sfbuf = 1; /* 0:kmem_map 1:sfbufs 2:sfbufs_dmap */ 167 /* 3:sfbuf_dmap w/ forced invlpg */ 168 169 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 170 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 171 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 172 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount, 173 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded"); 174 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 175 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 176 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 177 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 178 SYSCTL_INT(_kern_pipe, OID_AUTO, dwrite_enable, 179 CTLFLAG_RW, &pipe_dwrite_enable, 0, "1:enable/2:force direct writes"); 180 SYSCTL_INT(_kern_pipe, OID_AUTO, dwrite_sfbuf, 181 CTLFLAG_RW, &pipe_dwrite_sfbuf, 0, 182 "(if dwrite_enable) 0:kmem 1:sfbuf 2:sfbuf_dmap 3:sfbuf_dmap_forceinvlpg"); 183 #if !defined(NO_PIPE_SYSCTL_STATS) 184 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 185 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 186 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 187 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 188 #endif 189 190 static void pipeclose (struct pipe *cpipe); 191 static void pipe_free_kmem (struct pipe *cpipe); 192 static int pipe_create (struct pipe **cpipep); 193 static __inline int pipelock (struct pipe *cpipe, int catch); 194 static __inline void pipeunlock (struct pipe *cpipe); 195 static __inline void pipeselwakeup (struct pipe *cpipe); 196 #ifndef PIPE_NODIRECT 197 static int pipe_build_write_buffer (struct pipe *wpipe, struct uio *uio); 198 static int pipe_direct_write (struct pipe *wpipe, struct uio *uio); 199 static void pipe_clone_write_buffer (struct pipe *wpipe); 200 #endif 201 static int pipespace (struct pipe *cpipe, int size); 202 203 /* 204 * The pipe system call for the DTYPE_PIPE type of pipes 205 * 206 * pipe_ARgs(int dummy) 207 */ 208 209 /* ARGSUSED */ 210 int 211 sys_pipe(struct pipe_args *uap) 212 { 213 struct thread *td = curthread; 214 struct proc *p = td->td_proc; 215 struct file *rf, *wf; 216 struct pipe *rpipe, *wpipe; 217 int fd1, fd2, error; 218 219 KKASSERT(p); 220 221 rpipe = wpipe = NULL; 222 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 223 pipeclose(rpipe); 224 pipeclose(wpipe); 225 return (ENFILE); 226 } 227 228 rpipe->pipe_state |= PIPE_DIRECTOK; 229 wpipe->pipe_state |= PIPE_DIRECTOK; 230 231 /* 232 * Select the direct-map features to use for this pipe. Since the 233 * sysctl's can change on the fly we record the settings when the 234 * pipe is created. 235 * 236 * Generally speaking the system will default to what we consider 237 * to be the best-balanced and most stable option. Right now this 238 * is SFBUF1. Modes 2 and 3 are considered experiemental at the 239 * moment. 240 */ 241 wpipe->pipe_feature = PIPE_COPY; 242 if (pipe_dwrite_enable) { 243 switch(pipe_dwrite_sfbuf) { 244 case 0: 245 wpipe->pipe_feature = PIPE_KMEM; 246 break; 247 case 1: 248 wpipe->pipe_feature = PIPE_SFBUF1; 249 break; 250 case 2: 251 case 3: 252 wpipe->pipe_feature = PIPE_SFBUF2; 253 break; 254 } 255 } 256 rpipe->pipe_feature = wpipe->pipe_feature; 257 258 error = falloc(p, &rf, &fd1); 259 if (error) { 260 pipeclose(rpipe); 261 pipeclose(wpipe); 262 return (error); 263 } 264 uap->sysmsg_fds[0] = fd1; 265 266 /* 267 * Warning: once we've gotten past allocation of the fd for the 268 * read-side, we can only drop the read side via fdrop() in order 269 * to avoid races against processes which manage to dup() the read 270 * side while we are blocked trying to allocate the write side. 271 */ 272 rf->f_type = DTYPE_PIPE; 273 rf->f_flag = FREAD | FWRITE; 274 rf->f_ops = &pipeops; 275 rf->f_data = rpipe; 276 error = falloc(p, &wf, &fd2); 277 if (error) { 278 fsetfd(p, NULL, fd1); 279 fdrop(rf); 280 /* rpipe has been closed by fdrop(). */ 281 pipeclose(wpipe); 282 return (error); 283 } 284 wf->f_type = DTYPE_PIPE; 285 wf->f_flag = FREAD | FWRITE; 286 wf->f_ops = &pipeops; 287 wf->f_data = wpipe; 288 uap->sysmsg_fds[1] = fd2; 289 290 rpipe->pipe_peer = wpipe; 291 wpipe->pipe_peer = rpipe; 292 293 fsetfd(p, rf, fd1); 294 fsetfd(p, wf, fd2); 295 fdrop(rf); 296 fdrop(wf); 297 298 return (0); 299 } 300 301 /* 302 * Allocate kva for pipe circular buffer, the space is pageable 303 * This routine will 'realloc' the size of a pipe safely, if it fails 304 * it will retain the old buffer. 305 * If it fails it will return ENOMEM. 306 */ 307 static int 308 pipespace(struct pipe *cpipe, int size) 309 { 310 struct vm_object *object; 311 caddr_t buffer; 312 int npages, error; 313 314 npages = round_page(size) / PAGE_SIZE; 315 object = cpipe->pipe_buffer.object; 316 317 /* 318 * [re]create the object if necessary and reserve space for it 319 * in the kernel_map. The object and memory are pageable. On 320 * success, free the old resources before assigning the new 321 * ones. 322 */ 323 if (object == NULL || object->size != npages) { 324 object = vm_object_allocate(OBJT_DEFAULT, npages); 325 buffer = (caddr_t)vm_map_min(&kernel_map); 326 327 error = vm_map_find(&kernel_map, object, 0, 328 (vm_offset_t *)&buffer, size, 329 1, 330 VM_MAPTYPE_NORMAL, 331 VM_PROT_ALL, VM_PROT_ALL, 332 0); 333 334 if (error != KERN_SUCCESS) { 335 vm_object_deallocate(object); 336 return (ENOMEM); 337 } 338 pipe_free_kmem(cpipe); 339 cpipe->pipe_buffer.object = object; 340 cpipe->pipe_buffer.buffer = buffer; 341 cpipe->pipe_buffer.size = size; 342 ++pipe_bkmem_alloc; 343 } else { 344 ++pipe_bcache_alloc; 345 } 346 cpipe->pipe_buffer.in = 0; 347 cpipe->pipe_buffer.out = 0; 348 cpipe->pipe_buffer.cnt = 0; 349 return (0); 350 } 351 352 /* 353 * Initialize and allocate VM and memory for pipe, pulling the pipe from 354 * our per-cpu cache if possible. For now make sure it is sized for the 355 * smaller PIPE_SIZE default. 356 */ 357 static int 358 pipe_create(struct pipe **cpipep) 359 { 360 globaldata_t gd = mycpu; 361 struct pipe *cpipe; 362 int error; 363 364 if ((cpipe = gd->gd_pipeq) != NULL) { 365 gd->gd_pipeq = cpipe->pipe_peer; 366 --gd->gd_pipeqcount; 367 cpipe->pipe_peer = NULL; 368 } else { 369 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 370 } 371 *cpipep = cpipe; 372 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 373 return (error); 374 vfs_timestamp(&cpipe->pipe_ctime); 375 cpipe->pipe_atime = cpipe->pipe_ctime; 376 cpipe->pipe_mtime = cpipe->pipe_ctime; 377 return (0); 378 } 379 380 381 /* 382 * lock a pipe for I/O, blocking other access 383 */ 384 static __inline int 385 pipelock(struct pipe *cpipe, int catch) 386 { 387 int error; 388 389 while (cpipe->pipe_state & PIPE_LOCK) { 390 cpipe->pipe_state |= PIPE_LWANT; 391 error = tsleep(cpipe, (catch ? PCATCH : 0), "pipelk", 0); 392 if (error != 0) 393 return (error); 394 } 395 cpipe->pipe_state |= PIPE_LOCK; 396 return (0); 397 } 398 399 /* 400 * unlock a pipe I/O lock 401 */ 402 static __inline void 403 pipeunlock(struct pipe *cpipe) 404 { 405 406 cpipe->pipe_state &= ~PIPE_LOCK; 407 if (cpipe->pipe_state & PIPE_LWANT) { 408 cpipe->pipe_state &= ~PIPE_LWANT; 409 wakeup(cpipe); 410 } 411 } 412 413 static __inline void 414 pipeselwakeup(struct pipe *cpipe) 415 { 416 417 if (cpipe->pipe_state & PIPE_SEL) { 418 cpipe->pipe_state &= ~PIPE_SEL; 419 selwakeup(&cpipe->pipe_sel); 420 } 421 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 422 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 423 KNOTE(&cpipe->pipe_sel.si_note, 0); 424 } 425 426 /* 427 * MPALMOSTSAFE (acquires mplock) 428 */ 429 static int 430 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 431 { 432 struct pipe *rpipe; 433 int error; 434 int nread = 0; 435 int nbio; 436 u_int size; 437 438 get_mplock(); 439 rpipe = (struct pipe *) fp->f_data; 440 ++rpipe->pipe_busy; 441 error = pipelock(rpipe, 1); 442 if (error) 443 goto unlocked_error; 444 445 if (fflags & O_FBLOCKING) 446 nbio = 0; 447 else if (fflags & O_FNONBLOCKING) 448 nbio = 1; 449 else if (fp->f_flag & O_NONBLOCK) 450 nbio = 1; 451 else 452 nbio = 0; 453 454 while (uio->uio_resid) { 455 caddr_t va; 456 457 if (rpipe->pipe_buffer.cnt > 0) { 458 /* 459 * normal pipe buffer receive 460 */ 461 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 462 if (size > rpipe->pipe_buffer.cnt) 463 size = rpipe->pipe_buffer.cnt; 464 if (size > (u_int) uio->uio_resid) 465 size = (u_int) uio->uio_resid; 466 467 error = uiomove(&rpipe->pipe_buffer.buffer 468 [rpipe->pipe_buffer.out], 469 size, uio); 470 if (error) 471 break; 472 473 rpipe->pipe_buffer.out += size; 474 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 475 rpipe->pipe_buffer.out = 0; 476 477 rpipe->pipe_buffer.cnt -= size; 478 479 /* 480 * If there is no more to read in the pipe, reset 481 * its pointers to the beginning. This improves 482 * cache hit stats. 483 */ 484 if (rpipe->pipe_buffer.cnt == 0) { 485 rpipe->pipe_buffer.in = 0; 486 rpipe->pipe_buffer.out = 0; 487 } 488 nread += size; 489 #ifndef PIPE_NODIRECT 490 } else if (rpipe->pipe_kva && 491 rpipe->pipe_feature == PIPE_KMEM && 492 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 493 == PIPE_DIRECTW 494 ) { 495 /* 496 * Direct copy using source-side kva mapping 497 */ 498 size = rpipe->pipe_map.xio_bytes - 499 rpipe->pipe_buffer.out; 500 if (size > (u_int)uio->uio_resid) 501 size = (u_int)uio->uio_resid; 502 va = (caddr_t)rpipe->pipe_kva + 503 xio_kvaoffset(&rpipe->pipe_map, rpipe->pipe_buffer.out); 504 error = uiomove(va, size, uio); 505 if (error) 506 break; 507 nread += size; 508 rpipe->pipe_buffer.out += size; 509 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 510 rpipe->pipe_state |= PIPE_DIRECTIP; 511 rpipe->pipe_state &= ~PIPE_DIRECTW; 512 /* reset out index for copy mode */ 513 rpipe->pipe_buffer.out = 0; 514 wakeup(rpipe); 515 } 516 } else if (rpipe->pipe_buffer.out != rpipe->pipe_map.xio_bytes && 517 rpipe->pipe_kva && 518 rpipe->pipe_feature == PIPE_SFBUF2 && 519 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 520 == PIPE_DIRECTW 521 ) { 522 /* 523 * Direct copy, bypassing a kernel buffer. We cannot 524 * mess with the direct-write buffer until 525 * PIPE_DIRECTIP is cleared. In order to prevent 526 * the pipe_write code from racing itself in 527 * direct_write, we set DIRECTIP when we clear 528 * DIRECTW after we have exhausted the buffer. 529 */ 530 if (pipe_dwrite_sfbuf == 3) 531 rpipe->pipe_kvamask = 0; 532 pmap_qenter2(rpipe->pipe_kva, rpipe->pipe_map.xio_pages, 533 rpipe->pipe_map.xio_npages, 534 &rpipe->pipe_kvamask); 535 size = rpipe->pipe_map.xio_bytes - 536 rpipe->pipe_buffer.out; 537 if (size > (u_int)uio->uio_resid) 538 size = (u_int)uio->uio_resid; 539 va = (caddr_t)rpipe->pipe_kva + xio_kvaoffset(&rpipe->pipe_map, rpipe->pipe_buffer.out); 540 error = uiomove(va, size, uio); 541 if (error) 542 break; 543 nread += size; 544 rpipe->pipe_buffer.out += size; 545 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 546 rpipe->pipe_state |= PIPE_DIRECTIP; 547 rpipe->pipe_state &= ~PIPE_DIRECTW; 548 /* reset out index for copy mode */ 549 rpipe->pipe_buffer.out = 0; 550 wakeup(rpipe); 551 } 552 } else if (rpipe->pipe_buffer.out != rpipe->pipe_map.xio_bytes && 553 rpipe->pipe_feature == PIPE_SFBUF1 && 554 (rpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) 555 == PIPE_DIRECTW 556 ) { 557 /* 558 * Direct copy, bypassing a kernel buffer. We cannot 559 * mess with the direct-write buffer until 560 * PIPE_DIRECTIP is cleared. In order to prevent 561 * the pipe_write code from racing itself in 562 * direct_write, we set DIRECTIP when we clear 563 * DIRECTW after we have exhausted the buffer. 564 */ 565 error = xio_uio_copy(&rpipe->pipe_map, rpipe->pipe_buffer.out, uio, &size); 566 if (error) 567 break; 568 nread += size; 569 rpipe->pipe_buffer.out += size; 570 if (rpipe->pipe_buffer.out == rpipe->pipe_map.xio_bytes) { 571 rpipe->pipe_state |= PIPE_DIRECTIP; 572 rpipe->pipe_state &= ~PIPE_DIRECTW; 573 /* reset out index for copy mode */ 574 rpipe->pipe_buffer.out = 0; 575 wakeup(rpipe); 576 } 577 #endif 578 } else { 579 /* 580 * detect EOF condition 581 * read returns 0 on EOF, no need to set error 582 */ 583 if (rpipe->pipe_state & PIPE_EOF) 584 break; 585 586 /* 587 * If the "write-side" has been blocked, wake it up now. 588 */ 589 if (rpipe->pipe_state & PIPE_WANTW) { 590 rpipe->pipe_state &= ~PIPE_WANTW; 591 wakeup(rpipe); 592 } 593 594 /* 595 * Break if some data was read. 596 */ 597 if (nread > 0) 598 break; 599 600 /* 601 * Unlock the pipe buffer for our remaining 602 * processing. We will either break out with an 603 * error or we will sleep and relock to loop. 604 */ 605 pipeunlock(rpipe); 606 607 /* 608 * Handle non-blocking mode operation or 609 * wait for more data. 610 */ 611 if (nbio) { 612 error = EAGAIN; 613 } else { 614 rpipe->pipe_state |= PIPE_WANTR; 615 if ((error = tsleep(rpipe, PCATCH|PNORESCHED, 616 "piperd", 0)) == 0) { 617 error = pipelock(rpipe, 1); 618 } 619 } 620 if (error) 621 goto unlocked_error; 622 } 623 } 624 pipeunlock(rpipe); 625 626 if (error == 0) 627 vfs_timestamp(&rpipe->pipe_atime); 628 unlocked_error: 629 --rpipe->pipe_busy; 630 631 /* 632 * PIPE_WANT processing only makes sense if pipe_busy is 0. 633 */ 634 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 635 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 636 wakeup(rpipe); 637 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 638 /* 639 * Handle write blocking hysteresis. 640 */ 641 if (rpipe->pipe_state & PIPE_WANTW) { 642 rpipe->pipe_state &= ~PIPE_WANTW; 643 wakeup(rpipe); 644 } 645 } 646 647 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 648 pipeselwakeup(rpipe); 649 rel_mplock(); 650 return (error); 651 } 652 653 #ifndef PIPE_NODIRECT 654 /* 655 * Map the sending processes' buffer into kernel space and wire it. 656 * This is similar to a physical write operation. 657 */ 658 static int 659 pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio) 660 { 661 int error; 662 u_int size; 663 664 size = (u_int) uio->uio_iov->iov_len; 665 if (size > wpipe->pipe_buffer.size) 666 size = wpipe->pipe_buffer.size; 667 668 if (uio->uio_segflg == UIO_SYSSPACE) { 669 error = xio_init_kbuf(&wpipe->pipe_map, uio->uio_iov->iov_base, 670 size); 671 } else { 672 error = xio_init_ubuf(&wpipe->pipe_map, uio->uio_iov->iov_base, 673 size, XIOF_READ); 674 } 675 wpipe->pipe_buffer.out = 0; 676 if (error) 677 return(error); 678 679 /* 680 * Create a kernel map for KMEM and SFBUF2 copy modes. SFBUF2 will 681 * map the pages on the target while KMEM maps the pages now. 682 */ 683 switch(wpipe->pipe_feature) { 684 case PIPE_KMEM: 685 case PIPE_SFBUF2: 686 if (wpipe->pipe_kva == 0) { 687 wpipe->pipe_kva = 688 kmem_alloc_nofault(&kernel_map, XIO_INTERNAL_SIZE); 689 wpipe->pipe_kvamask = 0; 690 } 691 if (wpipe->pipe_feature == PIPE_KMEM) { 692 pmap_qenter(wpipe->pipe_kva, wpipe->pipe_map.xio_pages, 693 wpipe->pipe_map.xio_npages); 694 } 695 break; 696 default: 697 break; 698 } 699 700 /* 701 * And update the uio data. The XIO might have loaded fewer bytes 702 * then requested so reload 'size'. 703 */ 704 size = wpipe->pipe_map.xio_bytes; 705 uio->uio_iov->iov_len -= size; 706 uio->uio_iov->iov_base += size; 707 if (uio->uio_iov->iov_len == 0) 708 uio->uio_iov++; 709 uio->uio_resid -= size; 710 uio->uio_offset += size; 711 return (0); 712 } 713 714 /* 715 * In the case of a signal, the writing process might go away. This 716 * code copies the data into the circular buffer so that the source 717 * pages can be freed without loss of data. 718 * 719 * Note that in direct mode pipe_buffer.out is used to track the 720 * XIO offset. We are converting the direct mode into buffered mode 721 * which changes the meaning of pipe_buffer.out. 722 */ 723 static void 724 pipe_clone_write_buffer(struct pipe *wpipe) 725 { 726 int size; 727 int offset; 728 729 offset = wpipe->pipe_buffer.out; 730 size = wpipe->pipe_map.xio_bytes - offset; 731 732 KKASSERT(size <= wpipe->pipe_buffer.size); 733 734 wpipe->pipe_buffer.in = size; 735 wpipe->pipe_buffer.out = 0; 736 wpipe->pipe_buffer.cnt = size; 737 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTIP); 738 739 xio_copy_xtok(&wpipe->pipe_map, offset, wpipe->pipe_buffer.buffer, size); 740 xio_release(&wpipe->pipe_map); 741 if (wpipe->pipe_kva) { 742 pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); 743 kmem_free(&kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); 744 wpipe->pipe_kva = 0; 745 } 746 } 747 748 /* 749 * This implements the pipe buffer write mechanism. Note that only 750 * a direct write OR a normal pipe write can be pending at any given time. 751 * If there are any characters in the pipe buffer, the direct write will 752 * be deferred until the receiving process grabs all of the bytes from 753 * the pipe buffer. Then the direct mapping write is set-up. 754 */ 755 static int 756 pipe_direct_write(struct pipe *wpipe, struct uio *uio) 757 { 758 int error; 759 760 retry: 761 while (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 762 if (wpipe->pipe_state & PIPE_WANTR) { 763 wpipe->pipe_state &= ~PIPE_WANTR; 764 wakeup(wpipe); 765 } 766 wpipe->pipe_state |= PIPE_WANTW; 767 error = tsleep(wpipe, PCATCH, "pipdww", 0); 768 if (error) 769 goto error2; 770 if (wpipe->pipe_state & PIPE_EOF) { 771 error = EPIPE; 772 goto error2; 773 } 774 } 775 KKASSERT(wpipe->pipe_map.xio_bytes == 0); 776 if (wpipe->pipe_buffer.cnt > 0) { 777 if (wpipe->pipe_state & PIPE_WANTR) { 778 wpipe->pipe_state &= ~PIPE_WANTR; 779 wakeup(wpipe); 780 } 781 782 wpipe->pipe_state |= PIPE_WANTW; 783 error = tsleep(wpipe, PCATCH, "pipdwc", 0); 784 if (error) 785 goto error2; 786 if (wpipe->pipe_state & PIPE_EOF) { 787 error = EPIPE; 788 goto error2; 789 } 790 goto retry; 791 } 792 793 /* 794 * Build our direct-write buffer 795 */ 796 wpipe->pipe_state |= PIPE_DIRECTW | PIPE_DIRECTIP; 797 error = pipe_build_write_buffer(wpipe, uio); 798 if (error) 799 goto error1; 800 wpipe->pipe_state &= ~PIPE_DIRECTIP; 801 802 /* 803 * Wait until the receiver has snarfed the data. Since we are likely 804 * going to sleep we optimize the case and yield synchronously, 805 * possibly avoiding the tsleep(). 806 */ 807 error = 0; 808 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 809 if (wpipe->pipe_state & PIPE_EOF) { 810 pipelock(wpipe, 0); 811 xio_release(&wpipe->pipe_map); 812 if (wpipe->pipe_kva) { 813 pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); 814 kmem_free(&kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); 815 wpipe->pipe_kva = 0; 816 } 817 pipeunlock(wpipe); 818 pipeselwakeup(wpipe); 819 error = EPIPE; 820 goto error1; 821 } 822 if (wpipe->pipe_state & PIPE_WANTR) { 823 wpipe->pipe_state &= ~PIPE_WANTR; 824 wakeup(wpipe); 825 } 826 pipeselwakeup(wpipe); 827 error = tsleep(wpipe, PCATCH|PNORESCHED, "pipdwt", 0); 828 } 829 pipelock(wpipe,0); 830 if (wpipe->pipe_state & PIPE_DIRECTW) { 831 /* 832 * this bit of trickery substitutes a kernel buffer for 833 * the process that might be going away. 834 */ 835 pipe_clone_write_buffer(wpipe); 836 KKASSERT((wpipe->pipe_state & PIPE_DIRECTIP) == 0); 837 } else { 838 /* 839 * note: The pipe_kva mapping is not qremove'd here. For 840 * legacy PIPE_KMEM mode this constitutes an improvement 841 * over the original FreeBSD-4 algorithm. For PIPE_SFBUF2 842 * mode the kva mapping must not be removed to get the 843 * caching benefit. 844 * 845 * For testing purposes we will give the original algorithm 846 * the benefit of the doubt 'what it could have been', and 847 * keep the optimization. 848 */ 849 KKASSERT(wpipe->pipe_state & PIPE_DIRECTIP); 850 xio_release(&wpipe->pipe_map); 851 wpipe->pipe_state &= ~PIPE_DIRECTIP; 852 } 853 pipeunlock(wpipe); 854 return (error); 855 856 /* 857 * Direct-write error, clear the direct write flags. 858 */ 859 error1: 860 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTIP); 861 /* fallthrough */ 862 863 /* 864 * General error, wakeup the other side if it happens to be sleeping. 865 */ 866 error2: 867 wakeup(wpipe); 868 return (error); 869 } 870 #endif 871 872 /* 873 * MPALMOSTSAFE - acquires mplock 874 */ 875 static int 876 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 877 { 878 int error = 0; 879 int orig_resid; 880 int nbio; 881 struct pipe *wpipe, *rpipe; 882 883 get_mplock(); 884 rpipe = (struct pipe *) fp->f_data; 885 wpipe = rpipe->pipe_peer; 886 887 /* 888 * detect loss of pipe read side, issue SIGPIPE if lost. 889 */ 890 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 891 rel_mplock(); 892 return (EPIPE); 893 } 894 ++wpipe->pipe_busy; 895 896 if (fflags & O_FBLOCKING) 897 nbio = 0; 898 else if (fflags & O_FNONBLOCKING) 899 nbio = 1; 900 else if (fp->f_flag & O_NONBLOCK) 901 nbio = 1; 902 else 903 nbio = 0; 904 905 /* 906 * If it is advantageous to resize the pipe buffer, do 907 * so. 908 */ 909 if ((uio->uio_resid > PIPE_SIZE) && 910 (pipe_nbig < pipe_maxbig) && 911 (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) == 0 && 912 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 913 (wpipe->pipe_buffer.cnt == 0) && 914 (error = pipelock(wpipe, 1)) == 0) { 915 /* 916 * Recheck after lock. 917 */ 918 if ((pipe_nbig < pipe_maxbig) && 919 (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) == 0 && 920 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 921 (wpipe->pipe_buffer.cnt == 0)) { 922 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) { 923 ++pipe_bigcount; 924 pipe_nbig++; 925 } 926 } 927 pipeunlock(wpipe); 928 } 929 930 /* 931 * If an early error occured unbusy and return, waking up any pending 932 * readers. 933 */ 934 if (error) { 935 --wpipe->pipe_busy; 936 if ((wpipe->pipe_busy == 0) && 937 (wpipe->pipe_state & PIPE_WANT)) { 938 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 939 wakeup(wpipe); 940 } 941 rel_mplock(); 942 return(error); 943 } 944 945 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone")); 946 947 orig_resid = uio->uio_resid; 948 949 while (uio->uio_resid) { 950 int space; 951 952 #ifndef PIPE_NODIRECT 953 /* 954 * If direct process-to-process writes are enabled and 955 * the buffer is large enough, and this is blocking IO, 956 * then use the direct write feature. 957 * 958 * If the pipe was opened at a time when the direct write 959 * feature was not enabled pipe_feature will be set to 960 * PIPE_COPY and we do not use the feature. 961 * 962 * The direct write mechanism will detect the reader going 963 * away on us. 964 */ 965 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT || 966 pipe_dwrite_enable > 1) && 967 nbio == 0 && pipe_dwrite_enable && 968 wpipe->pipe_feature != PIPE_COPY 969 ) { 970 error = pipe_direct_write( wpipe, uio); 971 if (error) 972 break; 973 continue; 974 } 975 #endif 976 977 /* 978 * Pipe buffered writes cannot be coincidental with 979 * direct writes. We wait until the currently executing 980 * direct write is completed before we start filling the 981 * pipe buffer. We break out if a signal occurs or the 982 * reader goes away. 983 */ 984 retrywrite: 985 while (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 986 if (wpipe->pipe_state & PIPE_WANTR) { 987 wpipe->pipe_state &= ~PIPE_WANTR; 988 wakeup(wpipe); 989 } 990 error = tsleep(wpipe, PCATCH, "pipbww", 0); 991 if (wpipe->pipe_state & PIPE_EOF) 992 break; 993 if (error) 994 break; 995 } 996 if (wpipe->pipe_state & PIPE_EOF) { 997 error = EPIPE; 998 break; 999 } 1000 1001 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1002 1003 /* Writes of size <= PIPE_BUF must be atomic. */ 1004 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 1005 space = 0; 1006 1007 /* 1008 * Write to fill, read size handles write hysteresis. Also 1009 * additional restrictions can cause select-based non-blocking 1010 * writes to spin. 1011 */ 1012 if (space > 0) { 1013 if ((error = pipelock(wpipe,1)) == 0) { 1014 int size; /* Transfer size */ 1015 int segsize; /* first segment to transfer */ 1016 1017 /* 1018 * It is possible for a direct write to 1019 * slip in on us... handle it here... 1020 */ 1021 if (wpipe->pipe_state & (PIPE_DIRECTW|PIPE_DIRECTIP)) { 1022 pipeunlock(wpipe); 1023 goto retrywrite; 1024 } 1025 /* 1026 * If a process blocked in uiomove, our 1027 * value for space might be bad. 1028 * 1029 * XXX will we be ok if the reader has gone 1030 * away here? 1031 */ 1032 if (space > wpipe->pipe_buffer.size - 1033 wpipe->pipe_buffer.cnt) { 1034 pipeunlock(wpipe); 1035 goto retrywrite; 1036 } 1037 1038 /* 1039 * Transfer size is minimum of uio transfer 1040 * and free space in pipe buffer. 1041 */ 1042 if (space > uio->uio_resid) 1043 size = uio->uio_resid; 1044 else 1045 size = space; 1046 /* 1047 * First segment to transfer is minimum of 1048 * transfer size and contiguous space in 1049 * pipe buffer. If first segment to transfer 1050 * is less than the transfer size, we've got 1051 * a wraparound in the buffer. 1052 */ 1053 segsize = wpipe->pipe_buffer.size - 1054 wpipe->pipe_buffer.in; 1055 if (segsize > size) 1056 segsize = size; 1057 1058 /* Transfer first segment */ 1059 1060 error = uiomove(&wpipe->pipe_buffer.buffer 1061 [wpipe->pipe_buffer.in], 1062 segsize, uio); 1063 1064 if (error == 0 && segsize < size) { 1065 /* 1066 * Transfer remaining part now, to 1067 * support atomic writes. Wraparound 1068 * happened. 1069 */ 1070 if (wpipe->pipe_buffer.in + segsize != 1071 wpipe->pipe_buffer.size) 1072 panic("Expected pipe buffer wraparound disappeared"); 1073 1074 error = uiomove(&wpipe->pipe_buffer. 1075 buffer[0], 1076 size - segsize, uio); 1077 } 1078 if (error == 0) { 1079 wpipe->pipe_buffer.in += size; 1080 if (wpipe->pipe_buffer.in >= 1081 wpipe->pipe_buffer.size) { 1082 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 1083 panic("Expected wraparound bad"); 1084 wpipe->pipe_buffer.in = size - segsize; 1085 } 1086 1087 wpipe->pipe_buffer.cnt += size; 1088 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 1089 panic("Pipe buffer overflow"); 1090 1091 } 1092 pipeunlock(wpipe); 1093 } 1094 if (error) 1095 break; 1096 1097 } else { 1098 /* 1099 * If the "read-side" has been blocked, wake it up now 1100 * and yield to let it drain synchronously rather 1101 * then block. 1102 */ 1103 if (wpipe->pipe_state & PIPE_WANTR) { 1104 wpipe->pipe_state &= ~PIPE_WANTR; 1105 wakeup(wpipe); 1106 } 1107 1108 /* 1109 * don't block on non-blocking I/O 1110 */ 1111 if (nbio) { 1112 error = EAGAIN; 1113 break; 1114 } 1115 1116 /* 1117 * We have no more space and have something to offer, 1118 * wake up select/poll. 1119 */ 1120 pipeselwakeup(wpipe); 1121 1122 wpipe->pipe_state |= PIPE_WANTW; 1123 error = tsleep(wpipe, PCATCH|PNORESCHED, "pipewr", 0); 1124 if (error != 0) 1125 break; 1126 /* 1127 * If read side wants to go away, we just issue a signal 1128 * to ourselves. 1129 */ 1130 if (wpipe->pipe_state & PIPE_EOF) { 1131 error = EPIPE; 1132 break; 1133 } 1134 } 1135 } 1136 1137 --wpipe->pipe_busy; 1138 1139 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1140 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1141 wakeup(wpipe); 1142 } else if (wpipe->pipe_buffer.cnt > 0) { 1143 /* 1144 * If we have put any characters in the buffer, we wake up 1145 * the reader. 1146 */ 1147 if (wpipe->pipe_state & PIPE_WANTR) { 1148 wpipe->pipe_state &= ~PIPE_WANTR; 1149 wakeup(wpipe); 1150 } 1151 } 1152 1153 /* 1154 * Don't return EPIPE if I/O was successful 1155 */ 1156 if ((wpipe->pipe_buffer.cnt == 0) && 1157 (uio->uio_resid == 0) && 1158 (error == EPIPE)) { 1159 error = 0; 1160 } 1161 1162 if (error == 0) 1163 vfs_timestamp(&wpipe->pipe_mtime); 1164 1165 /* 1166 * We have something to offer, 1167 * wake up select/poll. 1168 */ 1169 if (wpipe->pipe_buffer.cnt) 1170 pipeselwakeup(wpipe); 1171 rel_mplock(); 1172 return (error); 1173 } 1174 1175 /* 1176 * MPALMOSTSAFE - acquires mplock 1177 * 1178 * we implement a very minimal set of ioctls for compatibility with sockets. 1179 */ 1180 int 1181 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred) 1182 { 1183 struct pipe *mpipe; 1184 int error; 1185 1186 get_mplock(); 1187 mpipe = (struct pipe *)fp->f_data; 1188 1189 switch (cmd) { 1190 case FIOASYNC: 1191 if (*(int *)data) { 1192 mpipe->pipe_state |= PIPE_ASYNC; 1193 } else { 1194 mpipe->pipe_state &= ~PIPE_ASYNC; 1195 } 1196 error = 0; 1197 break; 1198 case FIONREAD: 1199 if (mpipe->pipe_state & PIPE_DIRECTW) { 1200 *(int *)data = mpipe->pipe_map.xio_bytes - 1201 mpipe->pipe_buffer.out; 1202 } else { 1203 *(int *)data = mpipe->pipe_buffer.cnt; 1204 } 1205 error = 0; 1206 break; 1207 case FIOSETOWN: 1208 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1209 break; 1210 case FIOGETOWN: 1211 *(int *)data = fgetown(mpipe->pipe_sigio); 1212 error = 0; 1213 break; 1214 case TIOCSPGRP: 1215 /* This is deprecated, FIOSETOWN should be used instead. */ 1216 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1217 break; 1218 1219 case TIOCGPGRP: 1220 /* This is deprecated, FIOGETOWN should be used instead. */ 1221 *(int *)data = -fgetown(mpipe->pipe_sigio); 1222 error = 0; 1223 break; 1224 default: 1225 error = ENOTTY; 1226 break; 1227 } 1228 rel_mplock(); 1229 return (error); 1230 } 1231 1232 /* 1233 * MPALMOSTSAFE - acquires mplock 1234 */ 1235 int 1236 pipe_poll(struct file *fp, int events, struct ucred *cred) 1237 { 1238 struct pipe *rpipe; 1239 struct pipe *wpipe; 1240 int revents = 0; 1241 1242 get_mplock(); 1243 rpipe = (struct pipe *)fp->f_data; 1244 wpipe = rpipe->pipe_peer; 1245 if (events & (POLLIN | POLLRDNORM)) 1246 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1247 (rpipe->pipe_buffer.cnt > 0) || 1248 (rpipe->pipe_state & PIPE_EOF)) 1249 revents |= events & (POLLIN | POLLRDNORM); 1250 1251 if (events & (POLLOUT | POLLWRNORM)) 1252 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1253 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1254 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1255 revents |= events & (POLLOUT | POLLWRNORM); 1256 1257 if ((rpipe->pipe_state & PIPE_EOF) || 1258 (wpipe == NULL) || 1259 (wpipe->pipe_state & PIPE_EOF)) 1260 revents |= POLLHUP; 1261 1262 if (revents == 0) { 1263 if (events & (POLLIN | POLLRDNORM)) { 1264 selrecord(curthread, &rpipe->pipe_sel); 1265 rpipe->pipe_state |= PIPE_SEL; 1266 } 1267 1268 if (events & (POLLOUT | POLLWRNORM)) { 1269 selrecord(curthread, &wpipe->pipe_sel); 1270 wpipe->pipe_state |= PIPE_SEL; 1271 } 1272 } 1273 rel_mplock(); 1274 return (revents); 1275 } 1276 1277 /* 1278 * MPALMOSTSAFE - acquires mplock 1279 */ 1280 static int 1281 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1282 { 1283 struct pipe *pipe; 1284 1285 get_mplock(); 1286 pipe = (struct pipe *)fp->f_data; 1287 1288 bzero((caddr_t)ub, sizeof(*ub)); 1289 ub->st_mode = S_IFIFO; 1290 ub->st_blksize = pipe->pipe_buffer.size; 1291 ub->st_size = pipe->pipe_buffer.cnt; 1292 if (ub->st_size == 0 && (pipe->pipe_state & PIPE_DIRECTW)) { 1293 ub->st_size = pipe->pipe_map.xio_bytes - 1294 pipe->pipe_buffer.out; 1295 } 1296 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1297 ub->st_atimespec = pipe->pipe_atime; 1298 ub->st_mtimespec = pipe->pipe_mtime; 1299 ub->st_ctimespec = pipe->pipe_ctime; 1300 /* 1301 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1302 * st_flags, st_gen. 1303 * XXX (st_dev, st_ino) should be unique. 1304 */ 1305 rel_mplock(); 1306 return (0); 1307 } 1308 1309 /* 1310 * MPALMOSTSAFE - acquires mplock 1311 */ 1312 static int 1313 pipe_close(struct file *fp) 1314 { 1315 struct pipe *cpipe; 1316 1317 get_mplock(); 1318 cpipe = (struct pipe *)fp->f_data; 1319 fp->f_ops = &badfileops; 1320 fp->f_data = NULL; 1321 funsetown(cpipe->pipe_sigio); 1322 pipeclose(cpipe); 1323 rel_mplock(); 1324 return (0); 1325 } 1326 1327 /* 1328 * Shutdown one or both directions of a full-duplex pipe. 1329 * 1330 * MPALMOSTSAFE - acquires mplock 1331 */ 1332 static int 1333 pipe_shutdown(struct file *fp, int how) 1334 { 1335 struct pipe *rpipe; 1336 struct pipe *wpipe; 1337 int error = EPIPE; 1338 1339 get_mplock(); 1340 rpipe = (struct pipe *)fp->f_data; 1341 1342 switch(how) { 1343 case SHUT_RDWR: 1344 case SHUT_RD: 1345 if (rpipe) { 1346 rpipe->pipe_state |= PIPE_EOF; 1347 pipeselwakeup(rpipe); 1348 if (rpipe->pipe_busy) 1349 wakeup(rpipe); 1350 error = 0; 1351 } 1352 if (how == SHUT_RD) 1353 break; 1354 /* fall through */ 1355 case SHUT_WR: 1356 if (rpipe && (wpipe = rpipe->pipe_peer) != NULL) { 1357 wpipe->pipe_state |= PIPE_EOF; 1358 pipeselwakeup(wpipe); 1359 if (wpipe->pipe_busy) 1360 wakeup(wpipe); 1361 error = 0; 1362 } 1363 } 1364 rel_mplock(); 1365 return (error); 1366 } 1367 1368 static void 1369 pipe_free_kmem(struct pipe *cpipe) 1370 { 1371 if (cpipe->pipe_buffer.buffer != NULL) { 1372 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1373 --pipe_nbig; 1374 kmem_free(&kernel_map, 1375 (vm_offset_t)cpipe->pipe_buffer.buffer, 1376 cpipe->pipe_buffer.size); 1377 cpipe->pipe_buffer.buffer = NULL; 1378 cpipe->pipe_buffer.object = NULL; 1379 } 1380 #ifndef PIPE_NODIRECT 1381 KKASSERT(cpipe->pipe_map.xio_bytes == 0 && 1382 cpipe->pipe_map.xio_offset == 0 && 1383 cpipe->pipe_map.xio_npages == 0); 1384 #endif 1385 } 1386 1387 /* 1388 * shutdown the pipe 1389 */ 1390 static void 1391 pipeclose(struct pipe *cpipe) 1392 { 1393 globaldata_t gd; 1394 struct pipe *ppipe; 1395 1396 if (cpipe == NULL) 1397 return; 1398 1399 pipeselwakeup(cpipe); 1400 1401 /* 1402 * If the other side is blocked, wake it up saying that 1403 * we want to close it down. 1404 */ 1405 while (cpipe->pipe_busy) { 1406 wakeup(cpipe); 1407 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1408 tsleep(cpipe, 0, "pipecl", 0); 1409 } 1410 1411 /* 1412 * Disconnect from peer 1413 */ 1414 if ((ppipe = cpipe->pipe_peer) != NULL) { 1415 pipeselwakeup(ppipe); 1416 1417 ppipe->pipe_state |= PIPE_EOF; 1418 wakeup(ppipe); 1419 KNOTE(&ppipe->pipe_sel.si_note, 0); 1420 ppipe->pipe_peer = NULL; 1421 } 1422 1423 if (cpipe->pipe_kva) { 1424 pmap_qremove(cpipe->pipe_kva, XIO_INTERNAL_PAGES); 1425 kmem_free(&kernel_map, cpipe->pipe_kva, XIO_INTERNAL_SIZE); 1426 cpipe->pipe_kva = 0; 1427 } 1428 1429 /* 1430 * free or cache resources 1431 */ 1432 gd = mycpu; 1433 if (gd->gd_pipeqcount >= pipe_maxcache || 1434 cpipe->pipe_buffer.size != PIPE_SIZE 1435 ) { 1436 pipe_free_kmem(cpipe); 1437 kfree(cpipe, M_PIPE); 1438 } else { 1439 KKASSERT(cpipe->pipe_map.xio_npages == 0 && 1440 cpipe->pipe_map.xio_bytes == 0 && 1441 cpipe->pipe_map.xio_offset == 0); 1442 cpipe->pipe_state = 0; 1443 cpipe->pipe_busy = 0; 1444 cpipe->pipe_peer = gd->gd_pipeq; 1445 gd->gd_pipeq = cpipe; 1446 ++gd->gd_pipeqcount; 1447 } 1448 } 1449 1450 /* 1451 * MPALMOSTSAFE - acquires mplock 1452 */ 1453 static int 1454 pipe_kqfilter(struct file *fp, struct knote *kn) 1455 { 1456 struct pipe *cpipe; 1457 1458 get_mplock(); 1459 cpipe = (struct pipe *)kn->kn_fp->f_data; 1460 1461 switch (kn->kn_filter) { 1462 case EVFILT_READ: 1463 kn->kn_fop = &pipe_rfiltops; 1464 break; 1465 case EVFILT_WRITE: 1466 kn->kn_fop = &pipe_wfiltops; 1467 cpipe = cpipe->pipe_peer; 1468 if (cpipe == NULL) { 1469 /* other end of pipe has been closed */ 1470 rel_mplock(); 1471 return (EPIPE); 1472 } 1473 break; 1474 default: 1475 return (1); 1476 } 1477 kn->kn_hook = (caddr_t)cpipe; 1478 1479 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1480 rel_mplock(); 1481 return (0); 1482 } 1483 1484 static void 1485 filt_pipedetach(struct knote *kn) 1486 { 1487 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1488 1489 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1490 } 1491 1492 /*ARGSUSED*/ 1493 static int 1494 filt_piperead(struct knote *kn, long hint) 1495 { 1496 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1497 struct pipe *wpipe = rpipe->pipe_peer; 1498 1499 kn->kn_data = rpipe->pipe_buffer.cnt; 1500 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) { 1501 kn->kn_data = rpipe->pipe_map.xio_bytes - 1502 rpipe->pipe_buffer.out; 1503 } 1504 1505 if ((rpipe->pipe_state & PIPE_EOF) || 1506 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1507 kn->kn_flags |= EV_EOF; 1508 return (1); 1509 } 1510 return (kn->kn_data > 0); 1511 } 1512 1513 /*ARGSUSED*/ 1514 static int 1515 filt_pipewrite(struct knote *kn, long hint) 1516 { 1517 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1518 struct pipe *wpipe = rpipe->pipe_peer; 1519 1520 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1521 kn->kn_data = 0; 1522 kn->kn_flags |= EV_EOF; 1523 return (1); 1524 } 1525 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1526 if (wpipe->pipe_state & PIPE_DIRECTW) 1527 kn->kn_data = 0; 1528 1529 return (kn->kn_data >= PIPE_BUF); 1530 } 1531