1 /* 2 * Copyright (c) 1982, 1986, 1989, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * sendfile(2) and related extensions: 6 * Copyright (c) 1998, David Greenman. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 37 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $ 38 * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.17 2003/10/03 00:04:04 daver Exp $ 39 */ 40 41 #include "opt_compat.h" 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/sysproto.h> 48 #include <sys/malloc.h> 49 #include <sys/filedesc.h> 50 #include <sys/event.h> 51 #include <sys/proc.h> 52 #include <sys/fcntl.h> 53 #include <sys/file.h> 54 #include <sys/filio.h> 55 #include <sys/kern_syscall.h> 56 #include <sys/mbuf.h> 57 #include <sys/protosw.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/signalvar.h> 61 #include <sys/uio.h> 62 #include <sys/vnode.h> 63 #include <sys/lock.h> 64 #include <sys/mount.h> 65 #ifdef KTRACE 66 #include <sys/ktrace.h> 67 #endif 68 #include <vm/vm.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_pageout.h> 72 #include <vm/vm_kern.h> 73 #include <vm/vm_extern.h> 74 #include <sys/file2.h> 75 76 #if defined(COMPAT_43) 77 #include <emulation/43bsd/43bsd_socket.h> 78 #endif /* COMPAT_43 */ 79 80 static void sf_buf_init(void *arg); 81 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL) 82 83 static int do_sendfile(struct sendfile_args *uap, int compat); 84 85 static SLIST_HEAD(, sf_buf) sf_freelist; 86 static vm_offset_t sf_base; 87 static struct sf_buf *sf_bufs; 88 static int sf_buf_alloc_want; 89 90 /* 91 * System call interface to the socket abstraction. 92 */ 93 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 94 #define COMPAT_OLDSOCK 95 #endif 96 97 extern struct fileops socketops; 98 99 /* 100 * socket_args(int domain, int type, int protocol) 101 */ 102 int 103 socket(struct socket_args *uap) 104 { 105 struct thread *td = curthread; 106 struct proc *p = td->td_proc; 107 struct filedesc *fdp; 108 struct socket *so; 109 struct file *fp; 110 int fd, error; 111 112 KKASSERT(p); 113 fdp = p->p_fd; 114 115 error = falloc(p, &fp, &fd); 116 if (error) 117 return (error); 118 fhold(fp); 119 error = socreate(uap->domain, &so, uap->type, uap->protocol, td); 120 if (error) { 121 if (fdp->fd_ofiles[fd] == fp) { 122 fdp->fd_ofiles[fd] = NULL; 123 fdrop(fp, td); 124 } 125 } else { 126 fp->f_data = (caddr_t)so; 127 fp->f_flag = FREAD|FWRITE; 128 fp->f_ops = &socketops; 129 fp->f_type = DTYPE_SOCKET; 130 uap->sysmsg_result = fd; 131 } 132 fdrop(fp, td); 133 return (error); 134 } 135 136 int 137 kern_bind(int s, struct sockaddr *sa) 138 { 139 struct thread *td = curthread; 140 struct proc *p = td->td_proc; 141 struct file *fp; 142 int error; 143 144 KKASSERT(p); 145 error = holdsock(p->p_fd, s, &fp); 146 if (error) 147 return (error); 148 error = sobind((struct socket *)fp->f_data, sa, td); 149 fdrop(fp, td); 150 return (error); 151 } 152 153 /* 154 * bind_args(int s, caddr_t name, int namelen) 155 */ 156 int 157 bind(struct bind_args *uap) 158 { 159 struct sockaddr *sa; 160 int error; 161 162 error = getsockaddr(&sa, uap->name, uap->namelen); 163 if (error) 164 return (error); 165 error = kern_bind(uap->s, sa); 166 FREE(sa, M_SONAME); 167 168 return (error); 169 } 170 171 int 172 kern_listen(int s, int backlog) 173 { 174 struct thread *td = curthread; 175 struct proc *p = td->td_proc; 176 struct file *fp; 177 int error; 178 179 KKASSERT(p); 180 error = holdsock(p->p_fd, s, &fp); 181 if (error) 182 return (error); 183 error = solisten((struct socket *)fp->f_data, backlog, td); 184 fdrop(fp, td); 185 return(error); 186 } 187 188 /* 189 * listen_args(int s, int backlog) 190 */ 191 int 192 listen(struct listen_args *uap) 193 { 194 int error; 195 196 error = kern_listen(uap->s, uap->backlog); 197 return (error); 198 } 199 200 /* 201 * The second argument to kern_accept() is a handle to a struct sockaddr. 202 * This allows kern_accept() to return a pointer to an allocated struct 203 * sockaddr which must be freed later with FREE(). The caller must 204 * initialize *name to NULL. 205 */ 206 int 207 kern_accept(int s, struct sockaddr **name, int *namelen, int *res) 208 { 209 struct thread *td = curthread; 210 struct proc *p = td->td_proc; 211 struct filedesc *fdp = p->p_fd; 212 struct file *lfp = NULL; 213 struct file *nfp = NULL; 214 struct sockaddr *sa; 215 int error, s1; 216 struct socket *head, *so; 217 int fd; 218 u_int fflag; /* type must match fp->f_flag */ 219 int tmp; 220 221 if (name && namelen && *namelen < 0) 222 return (EINVAL); 223 224 error = holdsock(fdp, s, &lfp); 225 if (error) 226 return (error); 227 s1 = splnet(); 228 head = (struct socket *)lfp->f_data; 229 if ((head->so_options & SO_ACCEPTCONN) == 0) { 230 splx(s1); 231 error = EINVAL; 232 goto done; 233 } 234 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) { 235 if (head->so_state & SS_CANTRCVMORE) { 236 head->so_error = ECONNABORTED; 237 break; 238 } 239 if ((head->so_state & SS_NBIO) != 0) { 240 head->so_error = EWOULDBLOCK; 241 break; 242 } 243 error = tsleep((caddr_t)&head->so_timeo, PCATCH, "accept", 0); 244 if (error) { 245 splx(s1); 246 goto done; 247 } 248 } 249 if (head->so_error) { 250 error = head->so_error; 251 head->so_error = 0; 252 splx(s1); 253 goto done; 254 } 255 256 /* 257 * At this point we know that there is at least one connection 258 * ready to be accepted. Remove it from the queue prior to 259 * allocating the file descriptor for it since falloc() may 260 * block allowing another process to accept the connection 261 * instead. 262 */ 263 so = TAILQ_FIRST(&head->so_comp); 264 TAILQ_REMOVE(&head->so_comp, so, so_list); 265 head->so_qlen--; 266 267 fflag = lfp->f_flag; 268 error = falloc(p, &nfp, &fd); 269 if (error) { 270 /* 271 * Probably ran out of file descriptors. Put the 272 * unaccepted connection back onto the queue and 273 * do another wakeup so some other process might 274 * have a chance at it. 275 */ 276 TAILQ_INSERT_HEAD(&head->so_comp, so, so_list); 277 head->so_qlen++; 278 wakeup_one(&head->so_timeo); 279 splx(s1); 280 goto done; 281 } 282 fhold(nfp); 283 *res = fd; 284 285 /* connection has been removed from the listen queue */ 286 KNOTE(&head->so_rcv.sb_sel.si_note, 0); 287 288 so->so_state &= ~SS_COMP; 289 so->so_head = NULL; 290 if (head->so_sigio != NULL) 291 fsetown(fgetown(head->so_sigio), &so->so_sigio); 292 293 nfp->f_data = (caddr_t)so; 294 nfp->f_flag = fflag; 295 nfp->f_ops = &socketops; 296 nfp->f_type = DTYPE_SOCKET; 297 /* Sync socket nonblocking/async state with file flags */ 298 tmp = fflag & FNONBLOCK; 299 (void) fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td); 300 tmp = fflag & FASYNC; 301 (void) fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td); 302 303 sa = NULL; 304 error = soaccept(so, &sa); 305 306 /* 307 * Set the returned name and namelen as applicable. Set the returned 308 * namelen to 0 for older code which might ignore the return value 309 * from accept. 310 */ 311 if (error == 0) { 312 if (sa && name && namelen) { 313 if (*namelen > sa->sa_len) 314 *namelen = sa->sa_len; 315 *name = sa; 316 } else { 317 if (sa) 318 FREE(sa, M_SONAME); 319 } 320 } 321 322 /* 323 * close the new descriptor, assuming someone hasn't ripped it 324 * out from under us. Note that *res is normally ignored if an 325 * error is returned but a syscall message will still have access 326 * to the result code. 327 */ 328 if (error) { 329 *res = -1; 330 if (fdp->fd_ofiles[fd] == nfp) { 331 fdp->fd_ofiles[fd] = NULL; 332 fdrop(nfp, td); 333 } 334 } 335 splx(s1); 336 337 /* 338 * Release explicitly held references before returning. 339 */ 340 done: 341 if (nfp != NULL) 342 fdrop(nfp, td); 343 fdrop(lfp, td); 344 return (error); 345 } 346 347 /* 348 * accept_args(int s, caddr_t name, int *anamelen) 349 */ 350 int 351 accept(struct accept_args *uap) 352 { 353 struct sockaddr *sa = NULL; 354 int sa_len; 355 int error; 356 357 if (uap->name) { 358 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len)); 359 if (error) 360 return (error); 361 362 error = kern_accept(uap->s, &sa, &sa_len, &uap->sysmsg_result); 363 364 if (error == 0) 365 error = copyout(sa, uap->name, sa_len); 366 if (error == 0) { 367 error = copyout(&sa_len, uap->anamelen, 368 sizeof(*uap->anamelen)); 369 } 370 if (sa) 371 FREE(sa, M_SONAME); 372 } else { 373 error = kern_accept(uap->s, NULL, 0, &uap->sysmsg_result); 374 } 375 return (error); 376 } 377 378 int 379 kern_connect(int s, struct sockaddr *sa) 380 { 381 struct thread *td = curthread; 382 struct proc *p = td->td_proc; 383 struct file *fp; 384 struct socket *so; 385 int error; 386 387 error = holdsock(p->p_fd, s, &fp); 388 if (error) 389 return (error); 390 so = (struct socket *)fp->f_data; 391 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) { 392 error = EALREADY; 393 goto done; 394 } 395 error = soconnect(so, sa, td); 396 if (error) 397 goto bad; 398 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) { 399 error = EINPROGRESS; 400 goto done; 401 } 402 s = splnet(); 403 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 404 error = tsleep((caddr_t)&so->so_timeo, PCATCH, "connec", 0); 405 if (error) 406 break; 407 } 408 if (error == 0) { 409 error = so->so_error; 410 so->so_error = 0; 411 } 412 splx(s); 413 bad: 414 so->so_state &= ~SS_ISCONNECTING; 415 if (error == ERESTART) 416 error = EINTR; 417 done: 418 fdrop(fp, td); 419 return (error); 420 } 421 422 /* 423 * connect_args(int s, caddr_t name, int namelen) 424 */ 425 int 426 connect(struct connect_args *uap) 427 { 428 struct sockaddr *sa; 429 int error; 430 431 error = getsockaddr(&sa, uap->name, uap->namelen); 432 if (error) 433 return (error); 434 error = kern_connect(uap->s, sa); 435 FREE(sa, M_SONAME); 436 437 return (error); 438 } 439 440 int 441 kern_socketpair(int domain, int type, int protocol, int *sv) 442 { 443 struct thread *td = curthread; 444 struct proc *p = td->td_proc; 445 struct filedesc *fdp; 446 struct file *fp1, *fp2; 447 struct socket *so1, *so2; 448 int fd, error; 449 450 KKASSERT(p); 451 fdp = p->p_fd; 452 error = socreate(domain, &so1, type, protocol, td); 453 if (error) 454 return (error); 455 error = socreate(domain, &so2, type, protocol, td); 456 if (error) 457 goto free1; 458 error = falloc(p, &fp1, &fd); 459 if (error) 460 goto free2; 461 fhold(fp1); 462 sv[0] = fd; 463 fp1->f_data = (caddr_t)so1; 464 error = falloc(p, &fp2, &fd); 465 if (error) 466 goto free3; 467 fhold(fp2); 468 fp2->f_data = (caddr_t)so2; 469 sv[1] = fd; 470 error = soconnect2(so1, so2); 471 if (error) 472 goto free4; 473 if (type == SOCK_DGRAM) { 474 /* 475 * Datagram socket connection is asymmetric. 476 */ 477 error = soconnect2(so2, so1); 478 if (error) 479 goto free4; 480 } 481 fp1->f_flag = fp2->f_flag = FREAD|FWRITE; 482 fp1->f_ops = fp2->f_ops = &socketops; 483 fp1->f_type = fp2->f_type = DTYPE_SOCKET; 484 fdrop(fp1, td); 485 fdrop(fp2, td); 486 return (error); 487 free4: 488 if (fdp->fd_ofiles[sv[1]] == fp2) { 489 fdp->fd_ofiles[sv[1]] = NULL; 490 fdrop(fp2, td); 491 } 492 fdrop(fp2, td); 493 free3: 494 if (fdp->fd_ofiles[sv[0]] == fp1) { 495 fdp->fd_ofiles[sv[0]] = NULL; 496 fdrop(fp1, td); 497 } 498 fdrop(fp1, td); 499 free2: 500 (void)soclose(so2); 501 free1: 502 (void)soclose(so1); 503 return (error); 504 } 505 506 /* 507 * socketpair(int domain, int type, int protocol, int *rsv) 508 */ 509 int 510 socketpair(struct socketpair_args *uap) 511 { 512 int error, sockv[2]; 513 514 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv); 515 516 if (error == 0) 517 error = copyout(sockv, uap->rsv, sizeof(sockv)); 518 return (error); 519 } 520 521 int 522 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio, 523 struct mbuf *control, int flags, int *res) 524 { 525 struct thread *td = curthread; 526 struct proc *p = td->td_proc; 527 struct file *fp; 528 int len, error; 529 struct socket *so; 530 #ifdef KTRACE 531 struct iovec *ktriov = NULL; 532 struct uio ktruio; 533 #endif 534 535 error = holdsock(p->p_fd, s, &fp); 536 if (error) 537 return (error); 538 #ifdef KTRACE 539 if (KTRPOINT(td, KTR_GENIO)) { 540 int iovlen = auio->uio_iovcnt * sizeof (struct iovec); 541 542 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 543 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 544 ktruio = *auio; 545 } 546 #endif 547 len = auio->uio_resid; 548 so = (struct socket *)fp->f_data; 549 error = so->so_proto->pr_usrreqs->pru_sosend(so, sa, auio, NULL, 550 control, flags, td); 551 if (error) { 552 if (auio->uio_resid != len && (error == ERESTART || 553 error == EINTR || error == EWOULDBLOCK)) 554 error = 0; 555 if (error == EPIPE) 556 psignal(p, SIGPIPE); 557 } 558 #ifdef KTRACE 559 if (ktriov != NULL) { 560 if (error == 0) { 561 ktruio.uio_iov = ktriov; 562 ktruio.uio_resid = len - auio->uio_resid; 563 ktrgenio(p->p_tracep, s, UIO_WRITE, &ktruio, error); 564 } 565 FREE(ktriov, M_TEMP); 566 } 567 #endif 568 if (error == 0) 569 *res = len - auio->uio_resid; 570 fdrop(fp, td); 571 return (error); 572 } 573 574 /* 575 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen) 576 */ 577 int 578 sendto(struct sendto_args *uap) 579 { 580 struct thread *td = curthread; 581 struct uio auio; 582 struct iovec aiov; 583 struct sockaddr *sa = NULL; 584 int error; 585 586 if (uap->to) { 587 error = getsockaddr(&sa, uap->to, uap->tolen); 588 if (error) 589 return (error); 590 } 591 aiov.iov_base = uap->buf; 592 aiov.iov_len = uap->len; 593 auio.uio_iov = &aiov; 594 auio.uio_iovcnt = 1; 595 auio.uio_offset = 0; 596 auio.uio_resid = uap->len; 597 auio.uio_segflg = UIO_USERSPACE; 598 auio.uio_rw = UIO_WRITE; 599 auio.uio_td = td; 600 601 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags, 602 &uap->sysmsg_result); 603 604 if (sa) 605 FREE(sa, M_SONAME); 606 return (error); 607 } 608 609 /* 610 * sendmsg_args(int s, caddr_t msg, int flags) 611 */ 612 int 613 sendmsg(struct sendmsg_args *uap) 614 { 615 struct thread *td = curthread; 616 struct msghdr msg; 617 struct uio auio; 618 struct iovec aiov[UIO_SMALLIOV], *iov = NULL, *iovp; 619 struct sockaddr *sa = NULL; 620 struct mbuf *control = NULL; 621 int error, i; 622 623 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg)); 624 if (error) 625 return (error); 626 627 /* 628 * Conditionally copyin msg.msg_name. 629 */ 630 if (msg.msg_name) { 631 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen); 632 if (error) 633 return (error); 634 } 635 636 /* 637 * Populate auio. 638 */ 639 if (msg.msg_iovlen >= UIO_MAXIOV) { 640 error = EMSGSIZE; 641 goto cleanup; 642 } 643 if (msg.msg_iovlen >= UIO_SMALLIOV) { 644 MALLOC(iov, struct iovec *, 645 sizeof(struct iovec) * msg.msg_iovlen, M_IOV, M_WAITOK); 646 } else { 647 iov = aiov; 648 } 649 error = copyin(msg.msg_iov, iov, msg.msg_iovlen * sizeof(struct iovec)); 650 if (error) 651 goto cleanup; 652 auio.uio_iov = iov; 653 auio.uio_iovcnt = msg.msg_iovlen; 654 auio.uio_offset = 0; 655 auio.uio_resid = 0; 656 for (i = 0, iovp = auio.uio_iov; i < msg.msg_iovlen; i++, iovp++) { 657 auio.uio_resid += iovp->iov_len; 658 if (auio.uio_resid < 0) { 659 error = EINVAL; 660 goto cleanup; 661 } 662 } 663 auio.uio_segflg = UIO_USERSPACE; 664 auio.uio_rw = UIO_WRITE; 665 auio.uio_td = td; 666 667 /* 668 * Conditionally copyin msg.msg_control. 669 */ 670 if (msg.msg_control) { 671 if (msg.msg_controllen < sizeof(struct cmsghdr) || 672 msg.msg_controllen > MLEN) { 673 error = EINVAL; 674 goto cleanup; 675 } 676 control = m_get(M_WAIT, MT_CONTROL); 677 if (control == NULL) { 678 error = ENOBUFS; 679 goto cleanup; 680 } 681 control->m_len = msg.msg_controllen; 682 error = copyin(msg.msg_control, mtod(control, caddr_t), 683 msg.msg_controllen); 684 if (error) { 685 m_free(control); 686 goto cleanup; 687 } 688 } 689 690 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags, 691 &uap->sysmsg_result); 692 693 cleanup: 694 if (sa) 695 FREE(sa, M_SONAME); 696 if (iov != aiov) 697 FREE(iov, M_IOV); 698 return (error); 699 } 700 701 /* 702 * kern_recvmsg() takes a handle to sa and control. If the handle is non- 703 * null, it returns a dynamically allocated struct sockaddr and an mbuf. 704 * Don't forget to FREE() and m_free() these if they are returned. 705 */ 706 int 707 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio, 708 struct mbuf **control, int *flags, int *res) 709 { 710 struct thread *td = curthread; 711 struct proc *p = td->td_proc; 712 struct file *fp; 713 int len, error; 714 struct socket *so; 715 #ifdef KTRACE 716 struct iovec *ktriov = NULL; 717 struct uio ktruio; 718 #endif 719 720 error = holdsock(p->p_fd, s, &fp); 721 if (error) 722 return (error); 723 #ifdef KTRACE 724 if (KTRPOINT(td, KTR_GENIO)) { 725 int iovlen = auio->uio_iovcnt * sizeof (struct iovec); 726 727 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 728 bcopy(auio->uio_iov, ktriov, iovlen); 729 ktruio = *auio; 730 } 731 #endif 732 len = auio->uio_resid; 733 so = (struct socket *)fp->f_data; 734 error = so->so_proto->pr_usrreqs->pru_soreceive(so, sa, auio, NULL, 735 control, flags); 736 if (error) { 737 if (auio->uio_resid != len && (error == ERESTART || 738 error == EINTR || error == EWOULDBLOCK)) 739 error = 0; 740 } 741 #ifdef KTRACE 742 if (ktriov != NULL) { 743 if (error == 0) { 744 ktruio.uio_iov = ktriov; 745 ktruio.uio_resid = len - auio->uio_resid; 746 ktrgenio(p->p_tracep, s, UIO_READ, &ktruio, error); 747 } 748 FREE(ktriov, M_TEMP); 749 } 750 #endif 751 if (error == 0) 752 *res = len - auio->uio_resid; 753 fdrop(fp, td); 754 return (error); 755 } 756 757 /* 758 * recvfrom_args(int s, caddr_t buf, size_t len, int flags, 759 * caddr_t from, int *fromlenaddr) 760 */ 761 int 762 recvfrom(struct recvfrom_args *uap) 763 { 764 struct thread *td = curthread; 765 struct uio auio; 766 struct iovec aiov; 767 struct sockaddr *sa = NULL; 768 int error, fromlen; 769 770 if (uap->from && uap->fromlenaddr) { 771 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen)); 772 if (error) 773 return (error); 774 if (fromlen < 0) 775 return (EINVAL); 776 } else { 777 fromlen = 0; 778 } 779 aiov.iov_base = uap->buf; 780 aiov.iov_len = uap->len; 781 auio.uio_iov = &aiov; 782 auio.uio_iovcnt = 1; 783 auio.uio_offset = 0; 784 auio.uio_resid = uap->len; 785 auio.uio_segflg = UIO_USERSPACE; 786 auio.uio_rw = UIO_READ; 787 auio.uio_td = td; 788 789 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL, 790 &uap->flags, &uap->sysmsg_result); 791 792 if (error == 0 && uap->from) { 793 fromlen = MIN(fromlen, sa->sa_len); 794 error = copyout(sa, uap->from, fromlen); 795 if (error == 0) 796 error = copyout(&fromlen, uap->fromlenaddr, 797 sizeof(fromlen)); 798 } 799 if (sa) 800 FREE(sa, M_SONAME); 801 802 return (error); 803 } 804 805 /* 806 * recvmsg_args(int s, struct msghdr *msg, int flags) 807 */ 808 int 809 recvmsg(struct recvmsg_args *uap) 810 { 811 struct thread *td = curthread; 812 struct msghdr msg; 813 struct uio auio; 814 struct iovec aiov[UIO_SMALLIOV], *iov = NULL, *iovp; 815 struct mbuf *m, *control; 816 struct sockaddr *sa = NULL; 817 caddr_t ctlbuf; 818 socklen_t *ufromlenp, *ucontrollenp; 819 int error, fromlen, controllen, len, i, flags, *uflagsp; 820 821 /* 822 * This copyin handles everything except the iovec. 823 */ 824 error = copyin(uap->msg, &msg, sizeof(msg)); 825 if (error) 826 return (error); 827 828 if (msg.msg_name && msg.msg_namelen < 0) 829 return (EINVAL); 830 if (msg.msg_control && msg.msg_controllen < 0) 831 return (EINVAL); 832 833 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr, 834 msg_namelen)); 835 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr, 836 msg_controllen)); 837 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr, 838 msg_flags)); 839 840 /* 841 * Populate auio. 842 */ 843 if (msg.msg_iovlen >= UIO_MAXIOV) 844 return (EMSGSIZE); 845 if (msg.msg_iovlen >= UIO_SMALLIOV) { 846 MALLOC(iov, struct iovec *, 847 sizeof(struct iovec) * msg.msg_iovlen, M_IOV, M_WAITOK); 848 } else { 849 iov = aiov; 850 } 851 error = copyin(msg.msg_iov, iov, msg.msg_iovlen * sizeof(struct iovec)); 852 if (error) 853 goto cleanup; 854 auio.uio_iov = iov; 855 auio.uio_iovcnt = msg.msg_iovlen; 856 auio.uio_offset = 0; 857 auio.uio_resid = 0; 858 for (i = 0, iovp = auio.uio_iov; i < msg.msg_iovlen; i++, iovp++) { 859 auio.uio_resid += iovp->iov_len; 860 if (auio.uio_resid < 0) { 861 error = EINVAL; 862 goto cleanup; 863 } 864 } 865 auio.uio_segflg = UIO_USERSPACE; 866 auio.uio_rw = UIO_READ; 867 auio.uio_td = td; 868 869 flags = msg.msg_flags; 870 871 error = kern_recvmsg(uap->s, msg.msg_name ? &sa : NULL, &auio, 872 msg.msg_control ? &control : NULL, &flags, &uap->sysmsg_result); 873 874 /* 875 * Conditionally copyout the name and populate the namelen field. 876 */ 877 if (error == 0 && msg.msg_name) { 878 fromlen = MIN(msg.msg_namelen, sa->sa_len); 879 error = copyout(sa, msg.msg_name, fromlen); 880 if (error == 0) 881 error = copyout(&fromlen, ufromlenp, 882 sizeof(*ufromlenp)); 883 } 884 885 /* 886 * Copyout msg.msg_control and msg.msg_controllen. 887 */ 888 if (error == 0 && msg.msg_control) { 889 len = msg.msg_controllen; 890 m = control; 891 ctlbuf = (caddr_t)msg.msg_control; 892 893 while(m && len > 0) { 894 unsigned int tocopy; 895 896 if (len >= m->m_len) { 897 tocopy = m->m_len; 898 } else { 899 msg.msg_flags |= MSG_CTRUNC; 900 tocopy = len; 901 } 902 903 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy); 904 if (error) 905 goto cleanup; 906 907 ctlbuf += tocopy; 908 len -= tocopy; 909 m = m->m_next; 910 } 911 controllen = ctlbuf - (caddr_t)msg.msg_control; 912 error = copyout(&controllen, ucontrollenp, 913 sizeof(*ucontrollenp)); 914 } 915 916 if (error == 0) 917 error = copyout(&flags, uflagsp, sizeof(*uflagsp)); 918 919 cleanup: 920 if (sa) 921 FREE(sa, M_SONAME); 922 if (iov != aiov) 923 FREE(iov, M_IOV); 924 if (control) 925 m_freem(control); 926 return (error); 927 } 928 929 /* 930 * shutdown_args(int s, int how) 931 */ 932 /* ARGSUSED */ 933 int 934 shutdown(struct shutdown_args *uap) 935 { 936 struct thread *td = curthread; 937 struct proc *p = td->td_proc; 938 struct file *fp; 939 int error; 940 941 KKASSERT(p); 942 error = holdsock(p->p_fd, uap->s, &fp); 943 if (error) 944 return (error); 945 error = soshutdown((struct socket *)fp->f_data, uap->how); 946 fdrop(fp, td); 947 return(error); 948 } 949 950 /* 951 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an 952 * in kernel pointer instead of a userland pointer. This allows us 953 * to manipulate socket options in the emulation code. 954 */ 955 int 956 kern_setsockopt(int s, struct sockopt *sopt) 957 { 958 struct thread *td = curthread; 959 struct proc *p = td->td_proc; 960 struct file *fp; 961 int error; 962 963 if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0) 964 return (EFAULT); 965 if (sopt->sopt_valsize < 0) 966 return (EINVAL); 967 968 error = holdsock(p->p_fd, s, &fp); 969 if (error) 970 return (error); 971 972 error = sosetopt((struct socket *)fp->f_data, sopt); 973 fdrop(fp, td); 974 return (error); 975 } 976 977 /* 978 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize) 979 */ 980 int 981 setsockopt(struct setsockopt_args *uap) 982 { 983 struct thread *td = curthread; 984 struct sockopt sopt; 985 int error; 986 987 sopt.sopt_dir = SOPT_SET; 988 sopt.sopt_level = uap->level; 989 sopt.sopt_name = uap->name; 990 sopt.sopt_val = uap->val; 991 sopt.sopt_valsize = uap->valsize; 992 sopt.sopt_td = td; 993 994 error = kern_setsockopt(uap->s, &sopt); 995 return(error); 996 } 997 998 /* 999 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an 1000 * in kernel pointer instead of a userland pointer. This allows us 1001 * to manipulate socket options in the emulation code. 1002 */ 1003 int 1004 kern_getsockopt(int s, struct sockopt *sopt) 1005 { 1006 struct thread *td = curthread; 1007 struct proc *p = td->td_proc; 1008 struct file *fp; 1009 int error; 1010 1011 if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0) 1012 return (EFAULT); 1013 if (sopt->sopt_valsize < 0) 1014 return (EINVAL); 1015 1016 error = holdsock(p->p_fd, s, &fp); 1017 if (error) 1018 return (error); 1019 1020 error = sogetopt((struct socket *)fp->f_data, sopt); 1021 fdrop(fp, td); 1022 return (error); 1023 } 1024 1025 /* 1026 * getsockopt_Args(int s, int level, int name, caddr_t val, int *avalsize) 1027 */ 1028 int 1029 getsockopt(struct getsockopt_args *uap) 1030 { 1031 struct thread *td = curthread; 1032 struct sockopt sopt; 1033 int error, valsize; 1034 1035 if (uap->val) { 1036 error = copyin(uap->avalsize, &valsize, sizeof(valsize)); 1037 if (error) 1038 return (error); 1039 if (valsize < 0) 1040 return (EINVAL); 1041 } else { 1042 valsize = 0; 1043 } 1044 1045 sopt.sopt_dir = SOPT_GET; 1046 sopt.sopt_level = uap->level; 1047 sopt.sopt_name = uap->name; 1048 sopt.sopt_val = uap->val; 1049 sopt.sopt_valsize = valsize; 1050 sopt.sopt_td = td; 1051 1052 error = kern_getsockopt(uap->s, &sopt); 1053 if (error == 0) { 1054 valsize = sopt.sopt_valsize; 1055 error = copyout(&valsize, uap->avalsize, sizeof(valsize)); 1056 } 1057 return (error); 1058 } 1059 1060 /* 1061 * The second argument to kern_getsockname() is a handle to a struct sockaddr. 1062 * This allows kern_getsockname() to return a pointer to an allocated struct 1063 * sockaddr which must be freed later with FREE(). The caller must 1064 * initialize *name to NULL. 1065 */ 1066 int 1067 kern_getsockname(int s, struct sockaddr **name, int *namelen) 1068 { 1069 struct thread *td = curthread; 1070 struct proc *p = td->td_proc; 1071 struct file *fp; 1072 struct socket *so; 1073 struct sockaddr *sa = NULL; 1074 int error; 1075 1076 error = holdsock(p->p_fd, s, &fp); 1077 if (error) 1078 return (error); 1079 if (*namelen < 0) { 1080 fdrop(fp, td); 1081 return (EINVAL); 1082 } 1083 so = (struct socket *)fp->f_data; 1084 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &sa); 1085 if (error == 0) { 1086 if (sa == 0) { 1087 *namelen = 0; 1088 } else { 1089 *namelen = MIN(*namelen, sa->sa_len); 1090 *name = sa; 1091 } 1092 } 1093 1094 fdrop(fp, td); 1095 return (error); 1096 } 1097 1098 /* 1099 * getsockname_args(int fdes, caddr_t asa, int *alen) 1100 * 1101 * Get socket name. 1102 */ 1103 int 1104 getsockname(struct getsockname_args *uap) 1105 { 1106 struct sockaddr *sa = NULL; 1107 int error, sa_len; 1108 1109 error = copyin(uap->alen, &sa_len, sizeof(sa_len)); 1110 if (error) 1111 return (error); 1112 1113 error = kern_getsockname(uap->fdes, &sa, &sa_len); 1114 1115 if (error == 0) 1116 error = copyout(sa, uap->asa, sa_len); 1117 if (error == 0) 1118 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen)); 1119 if (sa) 1120 FREE(sa, M_SONAME); 1121 return (error); 1122 } 1123 1124 /* 1125 * The second argument to kern_getpeername() is a handle to a struct sockaddr. 1126 * This allows kern_getpeername() to return a pointer to an allocated struct 1127 * sockaddr which must be freed later with FREE(). The caller must 1128 * initialize *name to NULL. 1129 */ 1130 int 1131 kern_getpeername(int s, struct sockaddr **name, int *namelen) 1132 { 1133 struct thread *td = curthread; 1134 struct proc *p = td->td_proc; 1135 struct file *fp; 1136 struct socket *so; 1137 struct sockaddr *sa = NULL; 1138 int error; 1139 1140 error = holdsock(p->p_fd, s, &fp); 1141 if (error) 1142 return (error); 1143 if (*namelen < 0) { 1144 fdrop(fp, td); 1145 return (EINVAL); 1146 } 1147 so = (struct socket *)fp->f_data; 1148 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { 1149 fdrop(fp, td); 1150 return (ENOTCONN); 1151 } 1152 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, &sa); 1153 if (error == 0) { 1154 if (sa == 0) { 1155 *namelen = 0; 1156 } else { 1157 *namelen = MIN(*namelen, sa->sa_len); 1158 *name = sa; 1159 } 1160 } 1161 1162 fdrop(fp, td); 1163 return (error); 1164 } 1165 1166 /* 1167 * getpeername_args(int fdes, caddr_t asa, int *alen) 1168 * 1169 * Get name of peer for connected socket. 1170 */ 1171 int 1172 getpeername(struct getpeername_args *uap) 1173 { 1174 struct sockaddr *sa = NULL; 1175 int error, sa_len; 1176 1177 error = copyin(uap->alen, &sa_len, sizeof(sa_len)); 1178 if (error) 1179 return (error); 1180 1181 error = kern_getpeername(uap->fdes, &sa, &sa_len); 1182 1183 if (error == 0) 1184 error = copyout(sa, uap->asa, sa_len); 1185 if (error == 0) 1186 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen)); 1187 if (sa) 1188 FREE(sa, M_SONAME); 1189 return (error); 1190 } 1191 1192 /* 1193 * sockargs() will be removed soon. It is currently only called from the 1194 * emulation code. 1195 */ 1196 int 1197 sockargs(mp, buf, buflen, type) 1198 struct mbuf **mp; 1199 caddr_t buf; 1200 int buflen, type; 1201 { 1202 struct sockaddr *sa; 1203 struct mbuf *m; 1204 int error; 1205 1206 if ((u_int)buflen > MLEN) { 1207 #ifdef COMPAT_OLDSOCK 1208 if (type == MT_SONAME && (u_int)buflen <= 112) 1209 buflen = MLEN; /* unix domain compat. hack */ 1210 else 1211 #endif 1212 return (EINVAL); 1213 } 1214 m = m_get(M_WAIT, type); 1215 if (m == NULL) 1216 return (ENOBUFS); 1217 m->m_len = buflen; 1218 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen); 1219 if (error) 1220 (void) m_free(m); 1221 else { 1222 *mp = m; 1223 if (type == MT_SONAME) { 1224 sa = mtod(m, struct sockaddr *); 1225 1226 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN 1227 if (sa->sa_family == 0 && sa->sa_len < AF_MAX) 1228 sa->sa_family = sa->sa_len; 1229 #endif 1230 sa->sa_len = buflen; 1231 } 1232 } 1233 return (error); 1234 } 1235 1236 int 1237 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len) 1238 { 1239 struct sockaddr *sa; 1240 int error; 1241 1242 *namp = NULL; 1243 if (len > SOCK_MAXADDRLEN) 1244 return ENAMETOOLONG; 1245 if (len < offsetof(struct sockaddr, sa_data[0])) 1246 return EDOM; 1247 MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK); 1248 error = copyin(uaddr, sa, len); 1249 if (error) { 1250 FREE(sa, M_SONAME); 1251 } else { 1252 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN 1253 if (sa->sa_family == 0 && sa->sa_len < AF_MAX) 1254 sa->sa_family = sa->sa_len; 1255 #endif 1256 sa->sa_len = len; 1257 *namp = sa; 1258 } 1259 return error; 1260 } 1261 1262 /* 1263 * holdsock() - load the struct file pointer associated 1264 * with a socket into *fpp. If an error occurs, non-zero 1265 * will be returned and *fpp will be set to NULL. 1266 */ 1267 int 1268 holdsock(fdp, fdes, fpp) 1269 struct filedesc *fdp; 1270 int fdes; 1271 struct file **fpp; 1272 { 1273 struct file *fp = NULL; 1274 int error = 0; 1275 1276 if ((unsigned)fdes >= fdp->fd_nfiles || 1277 (fp = fdp->fd_ofiles[fdes]) == NULL) { 1278 error = EBADF; 1279 } else if (fp->f_type != DTYPE_SOCKET) { 1280 error = ENOTSOCK; 1281 fp = NULL; 1282 } else { 1283 fhold(fp); 1284 } 1285 *fpp = fp; 1286 return(error); 1287 } 1288 1289 /* 1290 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 1291 */ 1292 static void 1293 sf_buf_init(void *arg) 1294 { 1295 int i; 1296 1297 SLIST_INIT(&sf_freelist); 1298 sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE); 1299 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT); 1300 bzero(sf_bufs, nsfbufs * sizeof(struct sf_buf)); 1301 for (i = 0; i < nsfbufs; i++) { 1302 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 1303 SLIST_INSERT_HEAD(&sf_freelist, &sf_bufs[i], free_list); 1304 } 1305 } 1306 1307 /* 1308 * Get an sf_buf from the freelist. Will block if none are available. 1309 */ 1310 struct sf_buf * 1311 sf_buf_alloc() 1312 { 1313 struct sf_buf *sf; 1314 int s; 1315 int error; 1316 1317 s = splimp(); 1318 while ((sf = SLIST_FIRST(&sf_freelist)) == NULL) { 1319 sf_buf_alloc_want = 1; 1320 error = tsleep(&sf_freelist, PCATCH, "sfbufa", 0); 1321 if (error) 1322 break; 1323 } 1324 if (sf != NULL) { 1325 SLIST_REMOVE_HEAD(&sf_freelist, free_list); 1326 sf->refcnt = 1; 1327 } 1328 splx(s); 1329 return (sf); 1330 } 1331 1332 #define dtosf(x) (&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT]) 1333 void 1334 sf_buf_ref(caddr_t addr, u_int size) 1335 { 1336 struct sf_buf *sf; 1337 1338 sf = dtosf(addr); 1339 if (sf->refcnt == 0) 1340 panic("sf_buf_ref: referencing a free sf_buf"); 1341 sf->refcnt++; 1342 } 1343 1344 /* 1345 * Lose a reference to an sf_buf. When none left, detach mapped page 1346 * and release resources back to the system. 1347 * 1348 * Must be called at splimp. 1349 */ 1350 void 1351 sf_buf_free(caddr_t addr, u_int size) 1352 { 1353 struct sf_buf *sf; 1354 struct vm_page *m; 1355 int s; 1356 1357 sf = dtosf(addr); 1358 if (sf->refcnt == 0) 1359 panic("sf_buf_free: freeing free sf_buf"); 1360 sf->refcnt--; 1361 if (sf->refcnt == 0) { 1362 pmap_qremove((vm_offset_t)addr, 1); 1363 m = sf->m; 1364 s = splvm(); 1365 vm_page_unwire(m, 0); 1366 /* 1367 * Check for the object going away on us. This can 1368 * happen since we don't hold a reference to it. 1369 * If so, we're responsible for freeing the page. 1370 */ 1371 if (m->wire_count == 0 && m->object == NULL) 1372 vm_page_free(m); 1373 splx(s); 1374 sf->m = NULL; 1375 SLIST_INSERT_HEAD(&sf_freelist, sf, free_list); 1376 if (sf_buf_alloc_want) { 1377 sf_buf_alloc_want = 0; 1378 wakeup(&sf_freelist); 1379 } 1380 } 1381 } 1382 1383 /* 1384 * sendfile(2). 1385 * int sendfile(int fd, int s, off_t offset, size_t nbytes, 1386 * struct sf_hdtr *hdtr, off_t *sbytes, int flags) 1387 * 1388 * Send a file specified by 'fd' and starting at 'offset' to a socket 1389 * specified by 's'. Send only 'nbytes' of the file or until EOF if 1390 * nbytes == 0. Optionally add a header and/or trailer to the socket 1391 * output. If specified, write the total number of bytes sent into *sbytes. 1392 */ 1393 int 1394 sendfile(struct sendfile_args *uap) 1395 { 1396 return (do_sendfile(uap, 0)); 1397 } 1398 1399 #ifdef COMPAT_43 1400 int 1401 osendfile(struct osendfile_args *uap) 1402 { 1403 struct sendfile_args args; 1404 1405 args.fd = uap->fd; 1406 args.s = uap->s; 1407 args.offset = uap->offset; 1408 args.nbytes = uap->nbytes; 1409 args.hdtr = uap->hdtr; 1410 args.sbytes = uap->sbytes; 1411 args.flags = uap->flags; 1412 1413 return (do_sendfile(&args, 1)); 1414 } 1415 #endif 1416 1417 int 1418 do_sendfile(struct sendfile_args *uap, int compat) 1419 { 1420 struct thread *td = curthread; 1421 struct proc *p = td->td_proc; 1422 struct file *fp; 1423 struct filedesc *fdp; 1424 struct vnode *vp; 1425 struct vm_object *obj; 1426 struct socket *so; 1427 struct mbuf *m; 1428 struct sf_buf *sf; 1429 struct vm_page *pg; 1430 struct writev_args nuap; 1431 struct sf_hdtr hdtr; 1432 off_t off, xfsize, hdtr_size, sbytes = 0; 1433 int error = 0, s; 1434 1435 KKASSERT(p); 1436 fdp = p->p_fd; 1437 1438 vp = NULL; 1439 hdtr_size = 0; 1440 /* 1441 * Do argument checking. Must be a regular file in, stream 1442 * type and connected socket out, positive offset. 1443 */ 1444 fp = holdfp(fdp, uap->fd, FREAD); 1445 if (fp == NULL) { 1446 error = EBADF; 1447 goto done; 1448 } 1449 if (fp->f_type != DTYPE_VNODE) { 1450 error = EINVAL; 1451 goto done; 1452 } 1453 vp = (struct vnode *)fp->f_data; 1454 vref(vp); 1455 if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) { 1456 error = EINVAL; 1457 goto done; 1458 } 1459 fdrop(fp, td); 1460 error = holdsock(p->p_fd, uap->s, &fp); 1461 if (error) 1462 goto done; 1463 so = (struct socket *)fp->f_data; 1464 if (so->so_type != SOCK_STREAM) { 1465 error = EINVAL; 1466 goto done; 1467 } 1468 if ((so->so_state & SS_ISCONNECTED) == 0) { 1469 error = ENOTCONN; 1470 goto done; 1471 } 1472 if (uap->offset < 0) { 1473 error = EINVAL; 1474 goto done; 1475 } 1476 1477 /* 1478 * If specified, get the pointer to the sf_hdtr struct for 1479 * any headers/trailers. 1480 */ 1481 if (uap->hdtr != NULL) { 1482 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); 1483 if (error) 1484 goto done; 1485 /* 1486 * Send any headers. Wimp out and use writev(2). 1487 */ 1488 if (hdtr.headers != NULL) { 1489 nuap.fd = uap->s; 1490 nuap.iovp = hdtr.headers; 1491 nuap.iovcnt = hdtr.hdr_cnt; 1492 error = writev(&nuap); 1493 if (error) 1494 goto done; 1495 if (compat) 1496 sbytes += nuap.sysmsg_result; 1497 else 1498 hdtr_size += nuap.sysmsg_result; 1499 } 1500 } 1501 1502 /* 1503 * Protect against multiple writers to the socket. 1504 */ 1505 (void) sblock(&so->so_snd, M_WAITOK); 1506 1507 /* 1508 * Loop through the pages in the file, starting with the requested 1509 * offset. Get a file page (do I/O if necessary), map the file page 1510 * into an sf_buf, attach an mbuf header to the sf_buf, and queue 1511 * it on the socket. 1512 */ 1513 for (off = uap->offset; ; off += xfsize, sbytes += xfsize) { 1514 vm_pindex_t pindex; 1515 vm_offset_t pgoff; 1516 1517 pindex = OFF_TO_IDX(off); 1518 retry_lookup: 1519 /* 1520 * Calculate the amount to transfer. Not to exceed a page, 1521 * the EOF, or the passed in nbytes. 1522 */ 1523 xfsize = obj->un_pager.vnp.vnp_size - off; 1524 if (xfsize > PAGE_SIZE) 1525 xfsize = PAGE_SIZE; 1526 pgoff = (vm_offset_t)(off & PAGE_MASK); 1527 if (PAGE_SIZE - pgoff < xfsize) 1528 xfsize = PAGE_SIZE - pgoff; 1529 if (uap->nbytes && xfsize > (uap->nbytes - sbytes)) 1530 xfsize = uap->nbytes - sbytes; 1531 if (xfsize <= 0) 1532 break; 1533 /* 1534 * Optimize the non-blocking case by looking at the socket space 1535 * before going to the extra work of constituting the sf_buf. 1536 */ 1537 if ((so->so_state & SS_NBIO) && sbspace(&so->so_snd) <= 0) { 1538 if (so->so_state & SS_CANTSENDMORE) 1539 error = EPIPE; 1540 else 1541 error = EAGAIN; 1542 sbunlock(&so->so_snd); 1543 goto done; 1544 } 1545 /* 1546 * Attempt to look up the page. 1547 * 1548 * Allocate if not found 1549 * 1550 * Wait and loop if busy. 1551 */ 1552 pg = vm_page_lookup(obj, pindex); 1553 1554 if (pg == NULL) { 1555 pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL); 1556 if (pg == NULL) { 1557 VM_WAIT; 1558 goto retry_lookup; 1559 } 1560 vm_page_wakeup(pg); 1561 } else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) { 1562 goto retry_lookup; 1563 } 1564 1565 /* 1566 * Wire the page so it does not get ripped out from under 1567 * us. 1568 */ 1569 1570 vm_page_wire(pg); 1571 1572 /* 1573 * If page is not valid for what we need, initiate I/O 1574 */ 1575 1576 if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) { 1577 struct uio auio; 1578 struct iovec aiov; 1579 int bsize; 1580 1581 /* 1582 * Ensure that our page is still around when the I/O 1583 * completes. 1584 */ 1585 vm_page_io_start(pg); 1586 1587 /* 1588 * Get the page from backing store. 1589 */ 1590 bsize = vp->v_mount->mnt_stat.f_iosize; 1591 auio.uio_iov = &aiov; 1592 auio.uio_iovcnt = 1; 1593 aiov.iov_base = 0; 1594 aiov.iov_len = MAXBSIZE; 1595 auio.uio_resid = MAXBSIZE; 1596 auio.uio_offset = trunc_page(off); 1597 auio.uio_segflg = UIO_NOCOPY; 1598 auio.uio_rw = UIO_READ; 1599 auio.uio_td = td; 1600 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 1601 error = VOP_READ(vp, &auio, 1602 IO_VMIO | ((MAXBSIZE / bsize) << 16), 1603 p->p_ucred); 1604 VOP_UNLOCK(vp, 0, td); 1605 vm_page_flag_clear(pg, PG_ZERO); 1606 vm_page_io_finish(pg); 1607 if (error) { 1608 vm_page_unwire(pg, 0); 1609 /* 1610 * See if anyone else might know about this page. 1611 * If not and it is not valid, then free it. 1612 */ 1613 if (pg->wire_count == 0 && pg->valid == 0 && 1614 pg->busy == 0 && !(pg->flags & PG_BUSY) && 1615 pg->hold_count == 0) { 1616 vm_page_busy(pg); 1617 vm_page_free(pg); 1618 } 1619 sbunlock(&so->so_snd); 1620 goto done; 1621 } 1622 } 1623 1624 1625 /* 1626 * Get a sendfile buf. We usually wait as long as necessary, 1627 * but this wait can be interrupted. 1628 */ 1629 if ((sf = sf_buf_alloc()) == NULL) { 1630 s = splvm(); 1631 vm_page_unwire(pg, 0); 1632 if (pg->wire_count == 0 && pg->object == NULL) 1633 vm_page_free(pg); 1634 splx(s); 1635 sbunlock(&so->so_snd); 1636 error = EINTR; 1637 goto done; 1638 } 1639 1640 1641 /* 1642 * Allocate a kernel virtual page and insert the physical page 1643 * into it. 1644 */ 1645 1646 sf->m = pg; 1647 pmap_qenter(sf->kva, &pg, 1); 1648 /* 1649 * Get an mbuf header and set it up as having external storage. 1650 */ 1651 MGETHDR(m, M_WAIT, MT_DATA); 1652 if (m == NULL) { 1653 error = ENOBUFS; 1654 sf_buf_free((void *)sf->kva, PAGE_SIZE); 1655 sbunlock(&so->so_snd); 1656 goto done; 1657 } 1658 m->m_ext.ext_free = sf_buf_free; 1659 m->m_ext.ext_ref = sf_buf_ref; 1660 m->m_ext.ext_buf = (void *)sf->kva; 1661 m->m_ext.ext_size = PAGE_SIZE; 1662 m->m_data = (char *) sf->kva + pgoff; 1663 m->m_flags |= M_EXT; 1664 m->m_pkthdr.len = m->m_len = xfsize; 1665 /* 1666 * Add the buffer to the socket buffer chain. 1667 */ 1668 s = splnet(); 1669 retry_space: 1670 /* 1671 * Make sure that the socket is still able to take more data. 1672 * CANTSENDMORE being true usually means that the connection 1673 * was closed. so_error is true when an error was sensed after 1674 * a previous send. 1675 * The state is checked after the page mapping and buffer 1676 * allocation above since those operations may block and make 1677 * any socket checks stale. From this point forward, nothing 1678 * blocks before the pru_send (or more accurately, any blocking 1679 * results in a loop back to here to re-check). 1680 */ 1681 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) { 1682 if (so->so_state & SS_CANTSENDMORE) { 1683 error = EPIPE; 1684 } else { 1685 error = so->so_error; 1686 so->so_error = 0; 1687 } 1688 m_freem(m); 1689 sbunlock(&so->so_snd); 1690 splx(s); 1691 goto done; 1692 } 1693 /* 1694 * Wait for socket space to become available. We do this just 1695 * after checking the connection state above in order to avoid 1696 * a race condition with sbwait(). 1697 */ 1698 if (sbspace(&so->so_snd) < so->so_snd.sb_lowat) { 1699 if (so->so_state & SS_NBIO) { 1700 m_freem(m); 1701 sbunlock(&so->so_snd); 1702 splx(s); 1703 error = EAGAIN; 1704 goto done; 1705 } 1706 error = sbwait(&so->so_snd); 1707 /* 1708 * An error from sbwait usually indicates that we've 1709 * been interrupted by a signal. If we've sent anything 1710 * then return bytes sent, otherwise return the error. 1711 */ 1712 if (error) { 1713 m_freem(m); 1714 sbunlock(&so->so_snd); 1715 splx(s); 1716 goto done; 1717 } 1718 goto retry_space; 1719 } 1720 error = 1721 (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, td); 1722 splx(s); 1723 if (error) { 1724 sbunlock(&so->so_snd); 1725 goto done; 1726 } 1727 } 1728 sbunlock(&so->so_snd); 1729 1730 /* 1731 * Send trailers. Wimp out and use writev(2). 1732 */ 1733 if (uap->hdtr != NULL && hdtr.trailers != NULL) { 1734 nuap.fd = uap->s; 1735 nuap.iovp = hdtr.trailers; 1736 nuap.iovcnt = hdtr.trl_cnt; 1737 error = writev(&nuap); 1738 if (error) 1739 goto done; 1740 if (compat) 1741 sbytes += nuap.sysmsg_result; 1742 else 1743 hdtr_size += nuap.sysmsg_result; 1744 } 1745 1746 done: 1747 if (uap->sbytes != NULL) { 1748 if (compat == 0) 1749 sbytes += hdtr_size; 1750 copyout(&sbytes, uap->sbytes, sizeof(off_t)); 1751 } 1752 if (vp) 1753 vrele(vp); 1754 if (fp) 1755 fdrop(fp, td); 1756 return (error); 1757 } 1758