1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2015, Joyent, Inc. All rights reserved. 25 * Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. 26 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 27 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association. 28 * Copyright 2022 Garrett D'Amore 29 */ 30 31 #include <sys/types.h> 32 #include <sys/t_lock.h> 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/buf.h> 36 #include <sys/conf.h> 37 #include <sys/cred.h> 38 #include <sys/kmem.h> 39 #include <sys/sysmacros.h> 40 #include <sys/vfs.h> 41 #include <sys/vnode.h> 42 #include <sys/debug.h> 43 #include <sys/errno.h> 44 #include <sys/time.h> 45 #include <sys/file.h> 46 #include <sys/user.h> 47 #include <sys/stream.h> 48 #include <sys/strsubr.h> 49 #include <sys/strsun.h> 50 #include <sys/sunddi.h> 51 #include <sys/esunddi.h> 52 #include <sys/flock.h> 53 #include <sys/modctl.h> 54 #include <sys/cmn_err.h> 55 #include <sys/vmsystm.h> 56 #include <sys/policy.h> 57 #include <sys/limits.h> 58 59 #include <sys/socket.h> 60 #include <sys/socketvar.h> 61 62 #include <sys/isa_defs.h> 63 #include <sys/inttypes.h> 64 #include <sys/systm.h> 65 #include <sys/cpuvar.h> 66 #include <sys/filio.h> 67 #include <sys/sendfile.h> 68 #include <sys/ddi.h> 69 #include <vm/seg.h> 70 #include <vm/seg_map.h> 71 #include <vm/seg_kpm.h> 72 73 #include <fs/sockfs/sockcommon.h> 74 #include <fs/sockfs/sockfilter_impl.h> 75 #include <fs/sockfs/socktpi.h> 76 77 #ifdef SOCK_TEST 78 int do_useracc = 1; /* Controlled by setting SO_DEBUG to 4 */ 79 #else 80 #define do_useracc 1 81 #endif /* SOCK_TEST */ 82 83 extern int xnet_truncate_print; 84 85 /* 86 * Kernel component of socket creation. 87 * 88 * The socket library determines which version number to use. 89 * First the library calls this with a NULL devpath. If this fails 90 * to find a transport (using solookup) the library will look in /etc/netconfig 91 * for the appropriate transport. If one is found it will pass in the 92 * devpath for the kernel to use. 93 */ 94 int 95 so_socket(int family, int type_w_flags, int protocol, char *devpath, 96 int version) 97 { 98 struct sonode *so; 99 vnode_t *vp; 100 struct file *fp; 101 int fd; 102 int error; 103 int type; 104 105 type = type_w_flags & SOCK_TYPE_MASK; 106 type_w_flags &= ~SOCK_TYPE_MASK; 107 if (type_w_flags & ~(SOCK_CLOEXEC|SOCK_NDELAY|SOCK_NONBLOCK)) 108 return (set_errno(EINVAL)); 109 110 if (devpath != NULL) { 111 char *buf; 112 size_t kdevpathlen = 0; 113 114 buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 115 if ((error = copyinstr(devpath, buf, 116 MAXPATHLEN, &kdevpathlen)) != 0) { 117 kmem_free(buf, MAXPATHLEN); 118 return (set_errno(error)); 119 } 120 so = socket_create(family, type, protocol, buf, NULL, 121 SOCKET_SLEEP, version, CRED(), &error); 122 kmem_free(buf, MAXPATHLEN); 123 } else { 124 so = socket_create(family, type, protocol, NULL, NULL, 125 SOCKET_SLEEP, version, CRED(), &error); 126 } 127 if (so == NULL) 128 return (set_errno(error)); 129 130 /* Allocate a file descriptor for the socket */ 131 vp = SOTOV(so); 132 error = falloc(vp, FWRITE|FREAD, &fp, &fd); 133 if (error != 0) { 134 (void) socket_close(so, 0, CRED()); 135 socket_destroy(so); 136 return (set_errno(error)); 137 } 138 139 /* 140 * Now fill in the entries that falloc reserved 141 */ 142 if (type_w_flags & SOCK_NDELAY) { 143 so->so_state |= SS_NDELAY; 144 fp->f_flag |= FNDELAY; 145 } 146 if (type_w_flags & SOCK_NONBLOCK) { 147 so->so_state |= SS_NONBLOCK; 148 fp->f_flag |= FNONBLOCK; 149 } 150 mutex_exit(&fp->f_tlock); 151 setf(fd, fp); 152 if ((type_w_flags & SOCK_CLOEXEC) != 0) { 153 f_setfd(fd, FD_CLOEXEC); 154 } 155 156 return (fd); 157 } 158 159 /* 160 * Map from a file descriptor to a socket node. 161 * Returns with the file descriptor held i.e. the caller has to 162 * use releasef when done with the file descriptor. 163 */ 164 struct sonode * 165 getsonode(int sock, int *errorp, file_t **fpp) 166 { 167 file_t *fp; 168 vnode_t *vp; 169 struct sonode *so; 170 171 if ((fp = getf(sock)) == NULL) { 172 *errorp = EBADF; 173 eprintline(*errorp); 174 return (NULL); 175 } 176 vp = fp->f_vnode; 177 /* Check if it is a socket */ 178 if (vp->v_type != VSOCK) { 179 releasef(sock); 180 *errorp = ENOTSOCK; 181 eprintline(*errorp); 182 return (NULL); 183 } 184 /* 185 * Use the stream head to find the real socket vnode. 186 * This is needed when namefs sits above sockfs. 187 */ 188 if (vp->v_stream) { 189 ASSERT(vp->v_stream->sd_vnode); 190 vp = vp->v_stream->sd_vnode; 191 192 so = VTOSO(vp); 193 if (so->so_version == SOV_STREAM) { 194 releasef(sock); 195 *errorp = ENOTSOCK; 196 eprintsoline(so, *errorp); 197 return (NULL); 198 } 199 } else { 200 so = VTOSO(vp); 201 } 202 if (fpp) 203 *fpp = fp; 204 return (so); 205 } 206 207 /* 208 * Allocate and copyin a sockaddr. 209 * Ensures NULL termination for AF_UNIX addresses by extending them 210 * with one NULL byte if need be. Verifies that the length is not 211 * excessive to prevent an application from consuming all of kernel 212 * memory. Returns NULL when an error occurred. 213 */ 214 static struct sockaddr * 215 copyin_name(struct sonode *so, struct sockaddr *name, socklen_t *namelenp, 216 int *errorp) 217 { 218 char *faddr; 219 size_t namelen = (size_t)*namelenp; 220 221 ASSERT(namelen != 0); 222 if (namelen > SO_MAXARGSIZE) { 223 *errorp = EINVAL; 224 eprintsoline(so, *errorp); 225 return (NULL); 226 } 227 228 faddr = (char *)kmem_alloc(namelen, KM_SLEEP); 229 if (copyin(name, faddr, namelen)) { 230 kmem_free(faddr, namelen); 231 *errorp = EFAULT; 232 eprintsoline(so, *errorp); 233 return (NULL); 234 } 235 236 /* 237 * Add space for NULL termination if needed. 238 * Do a quick check if the last byte is NUL. 239 */ 240 if (so->so_family == AF_UNIX && faddr[namelen - 1] != '\0') { 241 /* Check if there is any NULL termination */ 242 size_t i; 243 int foundnull = 0; 244 245 for (i = sizeof (name->sa_family); i < namelen; i++) { 246 if (faddr[i] == '\0') { 247 foundnull = 1; 248 break; 249 } 250 } 251 if (!foundnull) { 252 /* Add extra byte for NUL padding */ 253 char *nfaddr; 254 255 nfaddr = (char *)kmem_alloc(namelen + 1, KM_SLEEP); 256 bcopy(faddr, nfaddr, namelen); 257 kmem_free(faddr, namelen); 258 259 /* NUL terminate */ 260 nfaddr[namelen] = '\0'; 261 namelen++; 262 ASSERT((socklen_t)namelen == namelen); 263 *namelenp = (socklen_t)namelen; 264 faddr = nfaddr; 265 } 266 } 267 return ((struct sockaddr *)faddr); 268 } 269 270 /* 271 * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL. 272 */ 273 static int 274 copyout_arg(void *uaddr, socklen_t ulen, void *ulenp, void *kaddr, 275 socklen_t klen) 276 { 277 if (uaddr != NULL) { 278 if (ulen > klen) 279 ulen = klen; 280 281 if (ulen != 0) { 282 if (copyout(kaddr, uaddr, ulen)) 283 return (EFAULT); 284 } 285 } else 286 ulen = 0; 287 288 if (ulenp != NULL) { 289 if (copyout(&ulen, ulenp, sizeof (ulen))) 290 return (EFAULT); 291 } 292 return (0); 293 } 294 295 /* 296 * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL. 297 * If klen is greater than ulen it still uses the non-truncated 298 * klen to update ulenp. 299 */ 300 static int 301 copyout_name(void *uaddr, socklen_t ulen, void *ulenp, void *kaddr, 302 socklen_t klen) 303 { 304 if (uaddr != NULL) { 305 if (ulen >= klen) 306 ulen = klen; 307 else if (ulen != 0 && xnet_truncate_print) { 308 printf("sockfs: truncating copyout of address using " 309 "XNET semantics for pid = %d. Lengths %d, %d\n", 310 curproc->p_pid, klen, ulen); 311 } 312 313 if (ulen != 0) { 314 if (copyout(kaddr, uaddr, ulen)) 315 return (EFAULT); 316 } else 317 klen = 0; 318 } else 319 klen = 0; 320 321 if (ulenp != NULL) { 322 if (copyout(&klen, ulenp, sizeof (klen))) 323 return (EFAULT); 324 } 325 return (0); 326 } 327 328 /* 329 * The socketpair() code in libsocket creates two sockets (using 330 * the /etc/netconfig fallback if needed) before calling this routine 331 * to connect the two sockets together. 332 * 333 * For a SOCK_STREAM socketpair a listener is needed - in that case this 334 * routine will create a new file descriptor as part of accepting the 335 * connection. The library socketpair() will check if svs[2] has changed 336 * in which case it will close the changed fd. 337 * 338 * Note that this code could use the TPI feature of accepting the connection 339 * on the listening endpoint. However, that would require significant changes 340 * to soaccept. 341 */ 342 int 343 so_socketpair(int sv[2]) 344 { 345 int svs[2]; 346 struct sonode *so1, *so2; 347 int error; 348 int orig_flags; 349 struct sockaddr_ux *name; 350 size_t namelen; 351 sotpi_info_t *sti1; 352 sotpi_info_t *sti2; 353 354 dprint(1, ("so_socketpair(%p)\n", (void *)sv)); 355 356 error = useracc(sv, sizeof (svs), B_WRITE); 357 if (error && do_useracc) 358 return (set_errno(EFAULT)); 359 360 if (copyin(sv, svs, sizeof (svs))) 361 return (set_errno(EFAULT)); 362 363 if ((so1 = getsonode(svs[0], &error, NULL)) == NULL) 364 return (set_errno(error)); 365 366 if ((so2 = getsonode(svs[1], &error, NULL)) == NULL) { 367 releasef(svs[0]); 368 return (set_errno(error)); 369 } 370 371 if (so1->so_family != AF_UNIX || so2->so_family != AF_UNIX) { 372 error = EOPNOTSUPP; 373 goto done; 374 } 375 376 sti1 = SOTOTPI(so1); 377 sti2 = SOTOTPI(so2); 378 379 /* 380 * The code below makes assumptions about the "sockfs" implementation. 381 * So make sure that the correct implementation is really used. 382 */ 383 ASSERT(so1->so_ops == &sotpi_sonodeops); 384 ASSERT(so2->so_ops == &sotpi_sonodeops); 385 386 if (so1->so_type == SOCK_DGRAM) { 387 /* 388 * Bind both sockets and connect them with each other. 389 * Need to allocate name/namelen for soconnect. 390 */ 391 error = socket_bind(so1, NULL, 0, _SOBIND_UNSPEC, CRED()); 392 if (error) { 393 eprintsoline(so1, error); 394 goto done; 395 } 396 error = socket_bind(so2, NULL, 0, _SOBIND_UNSPEC, CRED()); 397 if (error) { 398 eprintsoline(so2, error); 399 goto done; 400 } 401 namelen = sizeof (struct sockaddr_ux); 402 name = kmem_alloc(namelen, KM_SLEEP); 403 name->sou_family = AF_UNIX; 404 name->sou_addr = sti2->sti_ux_laddr; 405 error = socket_connect(so1, 406 (struct sockaddr *)name, 407 (socklen_t)namelen, 408 0, _SOCONNECT_NOXLATE, CRED()); 409 if (error) { 410 kmem_free(name, namelen); 411 eprintsoline(so1, error); 412 goto done; 413 } 414 name->sou_addr = sti1->sti_ux_laddr; 415 error = socket_connect(so2, 416 (struct sockaddr *)name, 417 (socklen_t)namelen, 418 0, _SOCONNECT_NOXLATE, CRED()); 419 kmem_free(name, namelen); 420 if (error) { 421 eprintsoline(so2, error); 422 goto done; 423 } 424 releasef(svs[0]); 425 releasef(svs[1]); 426 } else { 427 /* 428 * Bind both sockets, with so1 being a listener. 429 * Connect so2 to so1 - nonblocking to avoid waiting for 430 * soaccept to complete. 431 * Accept a connection on so1. Pass out the new fd as sv[0]. 432 * The library will detect the changed fd and close 433 * the original one. 434 */ 435 struct sonode *nso; 436 struct vnode *nvp; 437 struct file *nfp; 438 int nfd; 439 440 /* 441 * We could simply call socket_listen() here (which would do the 442 * binding automatically) if the code didn't rely on passing 443 * _SOBIND_NOXLATE to the TPI implementation of socket_bind(). 444 */ 445 error = socket_bind(so1, NULL, 0, _SOBIND_UNSPEC| 446 _SOBIND_NOXLATE|_SOBIND_LISTEN|_SOBIND_SOCKETPAIR, 447 CRED()); 448 if (error) { 449 eprintsoline(so1, error); 450 goto done; 451 } 452 error = socket_bind(so2, NULL, 0, _SOBIND_UNSPEC, CRED()); 453 if (error) { 454 eprintsoline(so2, error); 455 goto done; 456 } 457 458 namelen = sizeof (struct sockaddr_ux); 459 name = kmem_alloc(namelen, KM_SLEEP); 460 name->sou_family = AF_UNIX; 461 name->sou_addr = sti1->sti_ux_laddr; 462 error = socket_connect(so2, 463 (struct sockaddr *)name, 464 (socklen_t)namelen, 465 FNONBLOCK, _SOCONNECT_NOXLATE, CRED()); 466 kmem_free(name, namelen); 467 if (error) { 468 if (error != EINPROGRESS) { 469 eprintsoline(so2, error); goto done; 470 } 471 } 472 473 error = socket_accept(so1, 0, CRED(), &nso); 474 if (error) { 475 eprintsoline(so1, error); 476 goto done; 477 } 478 479 /* wait for so2 being SS_CONNECTED ignoring signals */ 480 mutex_enter(&so2->so_lock); 481 error = sowaitconnected(so2, 0, 1); 482 mutex_exit(&so2->so_lock); 483 if (error != 0) { 484 (void) socket_close(nso, 0, CRED()); 485 socket_destroy(nso); 486 eprintsoline(so2, error); 487 goto done; 488 } 489 490 nvp = SOTOV(nso); 491 error = falloc(nvp, FWRITE|FREAD, &nfp, &nfd); 492 if (error != 0) { 493 (void) socket_close(nso, 0, CRED()); 494 socket_destroy(nso); 495 eprintsoline(nso, error); 496 goto done; 497 } 498 /* 499 * copy over FNONBLOCK and FNDELAY flags should they exist 500 */ 501 if (so1->so_state & SS_NONBLOCK) 502 nfp->f_flag |= FNONBLOCK; 503 if (so1->so_state & SS_NDELAY) 504 nfp->f_flag |= FNDELAY; 505 506 /* 507 * fill in the entries that falloc reserved 508 */ 509 mutex_exit(&nfp->f_tlock); 510 setf(nfd, nfp); 511 512 /* 513 * get the original flags before we release 514 */ 515 VERIFY(f_getfd_error(svs[0], &orig_flags) == 0); 516 517 releasef(svs[0]); 518 releasef(svs[1]); 519 520 /* 521 * If FD_CLOEXEC was set on the filedescriptor we're 522 * swapping out, we should set it on the new one too. 523 */ 524 if (orig_flags & FD_CLOEXEC) { 525 f_setfd(nfd, FD_CLOEXEC); 526 } 527 528 /* 529 * The socketpair library routine will close the original 530 * svs[0] when this code passes out a different file 531 * descriptor. 532 */ 533 svs[0] = nfd; 534 535 if (copyout(svs, sv, sizeof (svs))) { 536 (void) closeandsetf(nfd, NULL); 537 eprintline(EFAULT); 538 return (set_errno(EFAULT)); 539 } 540 } 541 return (0); 542 543 done: 544 releasef(svs[0]); 545 releasef(svs[1]); 546 return (set_errno(error)); 547 } 548 549 int 550 bind(int sock, struct sockaddr *name, socklen_t namelen, int version) 551 { 552 struct sonode *so; 553 int error; 554 555 dprint(1, ("bind(%d, %p, %d)\n", 556 sock, (void *)name, namelen)); 557 558 if ((so = getsonode(sock, &error, NULL)) == NULL) 559 return (set_errno(error)); 560 561 /* Allocate and copyin name */ 562 /* 563 * X/Open test does not expect EFAULT with NULL name and non-zero 564 * namelen. 565 */ 566 if (name != NULL && namelen != 0) { 567 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 568 name = copyin_name(so, name, &namelen, &error); 569 if (name == NULL) { 570 releasef(sock); 571 return (set_errno(error)); 572 } 573 } else { 574 name = NULL; 575 namelen = 0; 576 } 577 578 switch (version) { 579 default: 580 error = socket_bind(so, name, namelen, 0, CRED()); 581 break; 582 case SOV_XPG4_2: 583 error = socket_bind(so, name, namelen, _SOBIND_XPG4_2, CRED()); 584 break; 585 case SOV_SOCKBSD: 586 error = socket_bind(so, name, namelen, _SOBIND_SOCKBSD, CRED()); 587 break; 588 } 589 done: 590 releasef(sock); 591 if (name != NULL) 592 kmem_free(name, (size_t)namelen); 593 594 if (error) 595 return (set_errno(error)); 596 return (0); 597 } 598 599 /* ARGSUSED2 */ 600 int 601 listen(int sock, int backlog, int version) 602 { 603 struct sonode *so; 604 int error; 605 606 dprint(1, ("listen(%d, %d)\n", 607 sock, backlog)); 608 609 if ((so = getsonode(sock, &error, NULL)) == NULL) 610 return (set_errno(error)); 611 612 error = socket_listen(so, backlog, CRED()); 613 614 releasef(sock); 615 if (error) 616 return (set_errno(error)); 617 return (0); 618 } 619 620 /*ARGSUSED3*/ 621 int 622 accept(int sock, struct sockaddr *name, socklen_t *namelenp, int version, 623 int flags) 624 { 625 struct sonode *so; 626 file_t *fp; 627 int error; 628 socklen_t namelen; 629 struct sonode *nso; 630 struct vnode *nvp; 631 struct file *nfp; 632 int nfd; 633 int ssflags; 634 struct sockaddr *addrp; 635 socklen_t addrlen; 636 637 dprint(1, ("accept(%d, %p, %p)\n", 638 sock, (void *)name, (void *)namelenp)); 639 640 if (flags & ~(SOCK_CLOEXEC|SOCK_NONBLOCK|SOCK_NDELAY)) { 641 return (set_errno(EINVAL)); 642 } 643 644 /* Translate SOCK_ flags to their SS_ variant */ 645 ssflags = 0; 646 if (flags & SOCK_NONBLOCK) 647 ssflags |= SS_NONBLOCK; 648 if (flags & SOCK_NDELAY) 649 ssflags |= SS_NDELAY; 650 651 if ((so = getsonode(sock, &error, &fp)) == NULL) 652 return (set_errno(error)); 653 654 if (name != NULL) { 655 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 656 if (copyin(namelenp, &namelen, sizeof (namelen))) { 657 releasef(sock); 658 return (set_errno(EFAULT)); 659 } 660 if (namelen != 0) { 661 error = useracc(name, (size_t)namelen, B_WRITE); 662 if (error && do_useracc) { 663 releasef(sock); 664 return (set_errno(EFAULT)); 665 } 666 } else 667 name = NULL; 668 } else { 669 namelen = 0; 670 } 671 672 /* 673 * Allocate the user fd before socket_accept() in order to 674 * catch EMFILE errors before calling socket_accept(). 675 */ 676 if ((nfd = ufalloc(0)) == -1) { 677 eprintsoline(so, EMFILE); 678 releasef(sock); 679 return (set_errno(EMFILE)); 680 } 681 error = socket_accept(so, fp->f_flag, CRED(), &nso); 682 if (error) { 683 setf(nfd, NULL); 684 releasef(sock); 685 return (set_errno(error)); 686 } 687 688 nvp = SOTOV(nso); 689 690 ASSERT(MUTEX_NOT_HELD(&nso->so_lock)); 691 if (namelen != 0) { 692 addrlen = so->so_max_addr_len; 693 addrp = (struct sockaddr *)kmem_alloc(addrlen, KM_SLEEP); 694 695 if ((error = socket_getpeername(nso, (struct sockaddr *)addrp, 696 &addrlen, B_TRUE, CRED())) == 0) { 697 error = copyout_name(name, namelen, namelenp, 698 addrp, addrlen); 699 } else { 700 ASSERT(error == EINVAL || error == ENOTCONN); 701 error = ECONNABORTED; 702 } 703 kmem_free(addrp, so->so_max_addr_len); 704 } 705 706 if (error) { 707 setf(nfd, NULL); 708 (void) socket_close(nso, 0, CRED()); 709 socket_destroy(nso); 710 releasef(sock); 711 return (set_errno(error)); 712 } 713 error = falloc(NULL, FWRITE|FREAD, &nfp, NULL); 714 if (error != 0) { 715 setf(nfd, NULL); 716 (void) socket_close(nso, 0, CRED()); 717 socket_destroy(nso); 718 eprintsoline(so, error); 719 releasef(sock); 720 return (set_errno(error)); 721 } 722 /* 723 * fill in the entries that falloc reserved 724 */ 725 nfp->f_vnode = nvp; 726 mutex_exit(&nfp->f_tlock); 727 setf(nfd, nfp); 728 729 /* 730 * Act on SOCK_CLOEXEC from flags 731 */ 732 if (flags & SOCK_CLOEXEC) { 733 f_setfd(nfd, FD_CLOEXEC); 734 } 735 736 /* 737 * Copy FNDELAY and FNONBLOCK from listener to acceptor 738 * and from ssflags 739 */ 740 if ((ssflags | so->so_state) & (SS_NDELAY|SS_NONBLOCK)) { 741 uint_t oflag = nfp->f_flag; 742 int arg = 0; 743 744 if ((ssflags | so->so_state) & SS_NONBLOCK) 745 arg |= FNONBLOCK; 746 else if ((ssflags | so->so_state) & SS_NDELAY) 747 arg |= FNDELAY; 748 749 /* 750 * This code is a simplification of the F_SETFL code in fcntl() 751 * Ignore any errors from VOP_SETFL. 752 */ 753 if ((error = VOP_SETFL(nvp, oflag, arg, nfp->f_cred, NULL)) 754 != 0) { 755 eprintsoline(so, error); 756 error = 0; 757 } else { 758 mutex_enter(&nfp->f_tlock); 759 nfp->f_flag &= ~FMASK | (FREAD|FWRITE); 760 nfp->f_flag |= arg; 761 mutex_exit(&nfp->f_tlock); 762 } 763 } 764 releasef(sock); 765 return (nfd); 766 } 767 768 int 769 connect(int sock, struct sockaddr *name, socklen_t namelen, int version) 770 { 771 struct sonode *so; 772 file_t *fp; 773 int error; 774 775 dprint(1, ("connect(%d, %p, %d)\n", 776 sock, (void *)name, namelen)); 777 778 if ((so = getsonode(sock, &error, &fp)) == NULL) 779 return (set_errno(error)); 780 781 /* Allocate and copyin name */ 782 if (namelen != 0) { 783 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 784 name = copyin_name(so, name, &namelen, &error); 785 if (name == NULL) { 786 releasef(sock); 787 return (set_errno(error)); 788 } 789 } else 790 name = NULL; 791 792 error = socket_connect(so, name, namelen, fp->f_flag, 793 (version != SOV_XPG4_2) ? 0 : _SOCONNECT_XPG4_2, CRED()); 794 releasef(sock); 795 if (name) 796 kmem_free(name, (size_t)namelen); 797 if (error) 798 return (set_errno(error)); 799 return (0); 800 } 801 802 /*ARGSUSED2*/ 803 int 804 shutdown(int sock, int how, int version) 805 { 806 struct sonode *so; 807 int error; 808 809 dprint(1, ("shutdown(%d, %d)\n", 810 sock, how)); 811 812 if ((so = getsonode(sock, &error, NULL)) == NULL) 813 return (set_errno(error)); 814 815 error = socket_shutdown(so, how, CRED()); 816 817 releasef(sock); 818 if (error) 819 return (set_errno(error)); 820 return (0); 821 } 822 823 /* 824 * Common receive routine. 825 */ 826 static ssize_t 827 recvit(int sock, struct nmsghdr *msg, struct uio *uiop, int flags, 828 socklen_t *namelenp, socklen_t *controllenp, int *flagsp) 829 { 830 struct sonode *so; 831 file_t *fp; 832 void *name; 833 socklen_t namelen; 834 void *control; 835 socklen_t controllen, free_controllen; 836 ssize_t len; 837 int error; 838 839 if ((so = getsonode(sock, &error, &fp)) == NULL) 840 return (set_errno(error)); 841 842 len = uiop->uio_resid; 843 uiop->uio_fmode = fp->f_flag; 844 uiop->uio_extflg = UIO_COPY_CACHED; 845 846 name = msg->msg_name; 847 namelen = msg->msg_namelen; 848 control = msg->msg_control; 849 controllen = msg->msg_controllen; 850 851 msg->msg_flags = flags & (MSG_OOB | MSG_PEEK | MSG_WAITALL | 852 MSG_DONTWAIT | MSG_XPG4_2); 853 854 error = socket_recvmsg(so, msg, uiop, CRED()); 855 if (error) { 856 releasef(sock); 857 return (set_errno(error)); 858 } 859 lwp_stat_update(LWP_STAT_MSGRCV, 1); 860 releasef(sock); 861 862 free_controllen = msg->msg_controllen; 863 864 error = copyout_name(name, namelen, namelenp, 865 msg->msg_name, msg->msg_namelen); 866 if (error) 867 goto err; 868 869 if (flagsp != NULL) { 870 /* 871 * Clear internal flag. 872 */ 873 msg->msg_flags &= ~MSG_XPG4_2; 874 875 /* 876 * Determine MSG_CTRUNC. sorecvmsg sets MSG_CTRUNC only 877 * when controllen is zero and there is control data to 878 * copy out. 879 */ 880 if (controllen != 0 && 881 (msg->msg_controllen > controllen || control == NULL)) { 882 dprint(1, ("recvit: CTRUNC %d %d %p\n", 883 msg->msg_controllen, controllen, control)); 884 885 msg->msg_flags |= MSG_CTRUNC; 886 } 887 if (copyout(&msg->msg_flags, flagsp, 888 sizeof (msg->msg_flags))) { 889 error = EFAULT; 890 goto err; 891 } 892 } 893 894 if (controllen != 0) { 895 if (!(flags & MSG_XPG4_2)) { 896 /* 897 * Good old msg_accrights can only return a multiple 898 * of 4 bytes. 899 */ 900 controllen &= ~((int)sizeof (uint32_t) - 1); 901 } 902 903 if (msg->msg_controllen > controllen || control == NULL) { 904 /* 905 * If the truncated part contains file descriptors, 906 * then they must be closed in the kernel as they 907 * will not be included in the data returned to 908 * user space. Close them now so that the header size 909 * can be safely adjusted prior to copyout. In case of 910 * an error during copyout, the remaining file 911 * descriptors will be closed in the error handler 912 * below. 913 */ 914 so_closefds(msg->msg_control, msg->msg_controllen, 915 !(flags & MSG_XPG4_2), 916 control == NULL ? 0 : controllen); 917 918 /* 919 * In the case of a truncated control message, the last 920 * cmsg header that fits into the available buffer 921 * space must be adjusted to reflect the actual amount 922 * of associated data that will be returned. This only 923 * needs to be done for XPG4 messages as non-XPG4 924 * messages are not structured (they are just a 925 * buffer and a length - msg_accrights(len)). 926 */ 927 if (control != NULL && (flags & MSG_XPG4_2)) { 928 so_truncatecmsg(msg->msg_control, 929 msg->msg_controllen, controllen); 930 msg->msg_controllen = controllen; 931 } 932 } 933 934 error = copyout_arg(control, controllen, controllenp, 935 msg->msg_control, msg->msg_controllen); 936 937 if (error) 938 goto err; 939 940 } 941 if (msg->msg_namelen != 0) 942 kmem_free(msg->msg_name, (size_t)msg->msg_namelen); 943 if (free_controllen != 0) 944 kmem_free(msg->msg_control, (size_t)free_controllen); 945 return (len - uiop->uio_resid); 946 947 err: 948 /* 949 * If we fail and the control part contains file descriptors 950 * we have to close them. For a truncated control message, the 951 * descriptors which were cut off have already been closed and the 952 * length adjusted so that they will not be closed again. 953 */ 954 if (msg->msg_controllen != 0) 955 so_closefds(msg->msg_control, msg->msg_controllen, 956 !(flags & MSG_XPG4_2), 0); 957 if (msg->msg_namelen != 0) 958 kmem_free(msg->msg_name, (size_t)msg->msg_namelen); 959 if (free_controllen != 0) 960 kmem_free(msg->msg_control, (size_t)free_controllen); 961 return (set_errno(error)); 962 } 963 964 /* 965 * Native system call 966 */ 967 ssize_t 968 recv(int sock, void *buffer, size_t len, int flags) 969 { 970 struct nmsghdr lmsg; 971 struct uio auio; 972 struct iovec aiov[1]; 973 974 dprint(1, ("recv(%d, %p, %ld, %d)\n", 975 sock, buffer, len, flags)); 976 977 if ((ssize_t)len < 0) { 978 return (set_errno(EINVAL)); 979 } 980 981 aiov[0].iov_base = buffer; 982 aiov[0].iov_len = len; 983 auio.uio_loffset = 0; 984 auio.uio_iov = aiov; 985 auio.uio_iovcnt = 1; 986 auio.uio_resid = len; 987 auio.uio_segflg = UIO_USERSPACE; 988 auio.uio_limit = 0; 989 990 lmsg.msg_namelen = 0; 991 lmsg.msg_controllen = 0; 992 lmsg.msg_flags = 0; 993 return (recvit(sock, &lmsg, &auio, flags, NULL, NULL, NULL)); 994 } 995 996 ssize_t 997 recvfrom(int sock, void *buffer, size_t len, int flags, struct sockaddr *name, 998 socklen_t *namelenp) 999 { 1000 struct nmsghdr lmsg; 1001 struct uio auio; 1002 struct iovec aiov[1]; 1003 1004 dprint(1, ("recvfrom(%d, %p, %ld, %d, %p, %p)\n", 1005 sock, buffer, len, flags, (void *)name, (void *)namelenp)); 1006 1007 if ((ssize_t)len < 0) { 1008 return (set_errno(EINVAL)); 1009 } 1010 1011 aiov[0].iov_base = buffer; 1012 aiov[0].iov_len = len; 1013 auio.uio_loffset = 0; 1014 auio.uio_iov = aiov; 1015 auio.uio_iovcnt = 1; 1016 auio.uio_resid = len; 1017 auio.uio_segflg = UIO_USERSPACE; 1018 auio.uio_limit = 0; 1019 1020 lmsg.msg_name = (char *)name; 1021 if (namelenp != NULL) { 1022 if (copyin(namelenp, &lmsg.msg_namelen, 1023 sizeof (lmsg.msg_namelen))) 1024 return (set_errno(EFAULT)); 1025 } else { 1026 lmsg.msg_namelen = 0; 1027 } 1028 lmsg.msg_controllen = 0; 1029 lmsg.msg_flags = 0; 1030 1031 return (recvit(sock, &lmsg, &auio, flags, namelenp, NULL, NULL)); 1032 } 1033 1034 /* 1035 * Uses the MSG_XPG4_2 flag to determine if the caller is using 1036 * struct omsghdr or struct nmsghdr. 1037 */ 1038 ssize_t 1039 recvmsg(int sock, struct nmsghdr *msg, int flags) 1040 { 1041 STRUCT_DECL(nmsghdr, u_lmsg); 1042 STRUCT_HANDLE(nmsghdr, umsgptr); 1043 struct nmsghdr lmsg; 1044 struct uio auio; 1045 struct iovec buf[IOV_MAX_STACK], *aiov = buf; 1046 ssize_t iovsize = 0; 1047 int iovcnt; 1048 ssize_t len, rval; 1049 int i; 1050 int *flagsp; 1051 model_t model; 1052 1053 dprint(1, ("recvmsg(%d, %p, %d)\n", 1054 sock, (void *)msg, flags)); 1055 1056 model = get_udatamodel(); 1057 STRUCT_INIT(u_lmsg, model); 1058 STRUCT_SET_HANDLE(umsgptr, model, msg); 1059 1060 if (flags & MSG_XPG4_2) { 1061 if (copyin(msg, STRUCT_BUF(u_lmsg), STRUCT_SIZE(u_lmsg))) 1062 return (set_errno(EFAULT)); 1063 flagsp = STRUCT_FADDR(umsgptr, msg_flags); 1064 } else { 1065 /* 1066 * Assumes that nmsghdr and omsghdr are identically shaped 1067 * except for the added msg_flags field. 1068 */ 1069 if (copyin(msg, STRUCT_BUF(u_lmsg), 1070 SIZEOF_STRUCT(omsghdr, model))) 1071 return (set_errno(EFAULT)); 1072 STRUCT_FSET(u_lmsg, msg_flags, 0); 1073 flagsp = NULL; 1074 } 1075 1076 /* 1077 * Code below us will kmem_alloc memory and hang it 1078 * off msg_control and msg_name fields. This forces 1079 * us to copy the structure to its native form. 1080 */ 1081 lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name); 1082 lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen); 1083 lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov); 1084 lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen); 1085 lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control); 1086 lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen); 1087 lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags); 1088 1089 iovcnt = lmsg.msg_iovlen; 1090 1091 if (iovcnt <= 0 || iovcnt > IOV_MAX) { 1092 return (set_errno(EMSGSIZE)); 1093 } 1094 1095 if (iovcnt > IOV_MAX_STACK) { 1096 iovsize = iovcnt * sizeof (struct iovec); 1097 aiov = kmem_alloc(iovsize, KM_SLEEP); 1098 } 1099 1100 #ifdef _SYSCALL32_IMPL 1101 /* 1102 * 32-bit callers need to have their iovec expanded, while ensuring 1103 * that they can't move more than 2Gbytes of data in a single call. 1104 */ 1105 if (model == DATAMODEL_ILP32) { 1106 struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32; 1107 ssize_t iov32size; 1108 ssize32_t count32; 1109 1110 iov32size = iovcnt * sizeof (struct iovec32); 1111 if (iovsize != 0) 1112 aiov32 = kmem_alloc(iov32size, KM_SLEEP); 1113 1114 if (copyin((struct iovec32 *)lmsg.msg_iov, aiov32, iov32size)) { 1115 if (iovsize != 0) { 1116 kmem_free(aiov32, iov32size); 1117 kmem_free(aiov, iovsize); 1118 } 1119 1120 return (set_errno(EFAULT)); 1121 } 1122 1123 count32 = 0; 1124 for (i = 0; i < iovcnt; i++) { 1125 ssize32_t iovlen32; 1126 1127 iovlen32 = aiov32[i].iov_len; 1128 count32 += iovlen32; 1129 if (iovlen32 < 0 || count32 < 0) { 1130 if (iovsize != 0) { 1131 kmem_free(aiov32, iov32size); 1132 kmem_free(aiov, iovsize); 1133 } 1134 1135 return (set_errno(EINVAL)); 1136 } 1137 1138 aiov[i].iov_len = iovlen32; 1139 aiov[i].iov_base = 1140 (caddr_t)(uintptr_t)aiov32[i].iov_base; 1141 } 1142 1143 if (iovsize != 0) 1144 kmem_free(aiov32, iov32size); 1145 } else 1146 #endif /* _SYSCALL32_IMPL */ 1147 if (copyin(lmsg.msg_iov, aiov, iovcnt * sizeof (struct iovec))) { 1148 if (iovsize != 0) 1149 kmem_free(aiov, iovsize); 1150 1151 return (set_errno(EFAULT)); 1152 } 1153 len = 0; 1154 for (i = 0; i < iovcnt; i++) { 1155 ssize_t iovlen = aiov[i].iov_len; 1156 len += iovlen; 1157 if (iovlen < 0 || len < 0) { 1158 if (iovsize != 0) 1159 kmem_free(aiov, iovsize); 1160 1161 return (set_errno(EINVAL)); 1162 } 1163 } 1164 auio.uio_loffset = 0; 1165 auio.uio_iov = aiov; 1166 auio.uio_iovcnt = iovcnt; 1167 auio.uio_resid = len; 1168 auio.uio_segflg = UIO_USERSPACE; 1169 auio.uio_limit = 0; 1170 1171 if (lmsg.msg_control != NULL && 1172 (do_useracc == 0 || 1173 useracc(lmsg.msg_control, lmsg.msg_controllen, 1174 B_WRITE) != 0)) { 1175 if (iovsize != 0) 1176 kmem_free(aiov, iovsize); 1177 1178 return (set_errno(EFAULT)); 1179 } 1180 1181 rval = recvit(sock, &lmsg, &auio, flags, 1182 STRUCT_FADDR(umsgptr, msg_namelen), 1183 STRUCT_FADDR(umsgptr, msg_controllen), flagsp); 1184 1185 if (iovsize != 0) 1186 kmem_free(aiov, iovsize); 1187 1188 return (rval); 1189 } 1190 1191 /* 1192 * Common send function. 1193 */ 1194 static ssize_t 1195 sendit(int sock, struct nmsghdr *msg, struct uio *uiop, int flags) 1196 { 1197 struct sonode *so; 1198 file_t *fp; 1199 void *name; 1200 socklen_t namelen; 1201 void *control; 1202 socklen_t controllen; 1203 ssize_t len; 1204 int error; 1205 1206 if ((so = getsonode(sock, &error, &fp)) == NULL) 1207 return (set_errno(error)); 1208 1209 uiop->uio_fmode = fp->f_flag; 1210 1211 if (so->so_family == AF_UNIX) 1212 uiop->uio_extflg = UIO_COPY_CACHED; 1213 else 1214 uiop->uio_extflg = UIO_COPY_DEFAULT; 1215 1216 /* Allocate and copyin name and control */ 1217 name = msg->msg_name; 1218 namelen = msg->msg_namelen; 1219 if (name != NULL && namelen != 0) { 1220 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1221 name = copyin_name(so, 1222 (struct sockaddr *)name, 1223 &namelen, &error); 1224 if (name == NULL) 1225 goto done3; 1226 /* copyin_name null terminates addresses for AF_UNIX */ 1227 msg->msg_namelen = namelen; 1228 msg->msg_name = name; 1229 } else { 1230 msg->msg_name = name = NULL; 1231 msg->msg_namelen = namelen = 0; 1232 } 1233 1234 control = msg->msg_control; 1235 controllen = msg->msg_controllen; 1236 if ((control != NULL) && (controllen != 0)) { 1237 /* 1238 * Verify that the length is not excessive to prevent 1239 * an application from consuming all of kernel memory. 1240 */ 1241 if (controllen > SO_MAXARGSIZE) { 1242 error = EINVAL; 1243 goto done2; 1244 } 1245 control = kmem_alloc(controllen, KM_SLEEP); 1246 1247 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1248 if (copyin(msg->msg_control, control, controllen)) { 1249 error = EFAULT; 1250 goto done1; 1251 } 1252 msg->msg_control = control; 1253 } else { 1254 msg->msg_control = control = NULL; 1255 msg->msg_controllen = controllen = 0; 1256 } 1257 1258 len = uiop->uio_resid; 1259 msg->msg_flags = flags; 1260 1261 error = socket_sendmsg(so, msg, uiop, CRED()); 1262 done1: 1263 if (control != NULL) 1264 kmem_free(control, controllen); 1265 done2: 1266 if (name != NULL) 1267 kmem_free(name, namelen); 1268 done3: 1269 if (error != 0) { 1270 releasef(sock); 1271 return (set_errno(error)); 1272 } 1273 lwp_stat_update(LWP_STAT_MSGSND, 1); 1274 releasef(sock); 1275 return (len - uiop->uio_resid); 1276 } 1277 1278 /* 1279 * Native system call 1280 */ 1281 ssize_t 1282 send(int sock, void *buffer, size_t len, int flags) 1283 { 1284 struct nmsghdr lmsg; 1285 struct uio auio; 1286 struct iovec aiov[1]; 1287 1288 dprint(1, ("send(%d, %p, %ld, %d)\n", 1289 sock, buffer, len, flags)); 1290 1291 if ((ssize_t)len < 0) { 1292 return (set_errno(EINVAL)); 1293 } 1294 1295 aiov[0].iov_base = buffer; 1296 aiov[0].iov_len = len; 1297 auio.uio_loffset = 0; 1298 auio.uio_iov = aiov; 1299 auio.uio_iovcnt = 1; 1300 auio.uio_resid = len; 1301 auio.uio_segflg = UIO_USERSPACE; 1302 auio.uio_limit = 0; 1303 1304 lmsg.msg_name = NULL; 1305 lmsg.msg_control = NULL; 1306 if (!(flags & MSG_XPG4_2)) { 1307 /* 1308 * In order to be compatible with the libsocket/sockmod 1309 * implementation we set EOR for all send* calls. 1310 */ 1311 flags |= MSG_EOR; 1312 } 1313 return (sendit(sock, &lmsg, &auio, flags)); 1314 } 1315 1316 /* 1317 * Uses the MSG_XPG4_2 flag to determine if the caller is using 1318 * struct omsghdr or struct nmsghdr. 1319 */ 1320 ssize_t 1321 sendmsg(int sock, struct nmsghdr *msg, int flags) 1322 { 1323 struct nmsghdr lmsg; 1324 STRUCT_DECL(nmsghdr, u_lmsg); 1325 struct uio auio; 1326 struct iovec buf[IOV_MAX_STACK], *aiov = buf; 1327 ssize_t iovsize = 0; 1328 int iovcnt; 1329 ssize_t len, rval; 1330 int i; 1331 model_t model; 1332 1333 dprint(1, ("sendmsg(%d, %p, %d)\n", sock, (void *)msg, flags)); 1334 1335 model = get_udatamodel(); 1336 STRUCT_INIT(u_lmsg, model); 1337 1338 if (flags & MSG_XPG4_2) { 1339 if (copyin(msg, (char *)STRUCT_BUF(u_lmsg), 1340 STRUCT_SIZE(u_lmsg))) 1341 return (set_errno(EFAULT)); 1342 } else { 1343 /* 1344 * Assumes that nmsghdr and omsghdr are identically shaped 1345 * except for the added msg_flags field. 1346 */ 1347 if (copyin(msg, (char *)STRUCT_BUF(u_lmsg), 1348 SIZEOF_STRUCT(omsghdr, model))) 1349 return (set_errno(EFAULT)); 1350 /* 1351 * In order to be compatible with the libsocket/sockmod 1352 * implementation we set EOR for all send* calls. 1353 */ 1354 flags |= MSG_EOR; 1355 } 1356 1357 /* 1358 * Code below us will kmem_alloc memory and hang it 1359 * off msg_control and msg_name fields. This forces 1360 * us to copy the structure to its native form. 1361 */ 1362 lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name); 1363 lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen); 1364 lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov); 1365 lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen); 1366 lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control); 1367 lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen); 1368 lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags); 1369 1370 iovcnt = lmsg.msg_iovlen; 1371 1372 if (iovcnt <= 0 || iovcnt > IOV_MAX) { 1373 /* 1374 * Unless this is XPG 4.2 we allow iovcnt == 0 to 1375 * be compatible with SunOS 4.X and 4.4BSD. 1376 */ 1377 if (iovcnt != 0 || (flags & MSG_XPG4_2)) 1378 return (set_errno(EMSGSIZE)); 1379 } 1380 1381 if (iovcnt > IOV_MAX_STACK) { 1382 iovsize = iovcnt * sizeof (struct iovec); 1383 aiov = kmem_alloc(iovsize, KM_SLEEP); 1384 } 1385 1386 #ifdef _SYSCALL32_IMPL 1387 /* 1388 * 32-bit callers need to have their iovec expanded, while ensuring 1389 * that they can't move more than 2Gbytes of data in a single call. 1390 */ 1391 if (model == DATAMODEL_ILP32) { 1392 struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32; 1393 ssize_t iov32size; 1394 ssize32_t count32; 1395 1396 iov32size = iovcnt * sizeof (struct iovec32); 1397 if (iovsize != 0) 1398 aiov32 = kmem_alloc(iov32size, KM_SLEEP); 1399 1400 if (iovcnt != 0 && 1401 copyin((struct iovec32 *)lmsg.msg_iov, aiov32, iov32size)) { 1402 if (iovsize != 0) { 1403 kmem_free(aiov32, iov32size); 1404 kmem_free(aiov, iovsize); 1405 } 1406 1407 return (set_errno(EFAULT)); 1408 } 1409 1410 count32 = 0; 1411 for (i = 0; i < iovcnt; i++) { 1412 ssize32_t iovlen32; 1413 1414 iovlen32 = aiov32[i].iov_len; 1415 count32 += iovlen32; 1416 if (iovlen32 < 0 || count32 < 0) { 1417 if (iovsize != 0) { 1418 kmem_free(aiov32, iov32size); 1419 kmem_free(aiov, iovsize); 1420 } 1421 1422 return (set_errno(EINVAL)); 1423 } 1424 1425 aiov[i].iov_len = iovlen32; 1426 aiov[i].iov_base = 1427 (caddr_t)(uintptr_t)aiov32[i].iov_base; 1428 } 1429 1430 if (iovsize != 0) 1431 kmem_free(aiov32, iov32size); 1432 } else 1433 #endif /* _SYSCALL32_IMPL */ 1434 if (iovcnt != 0 && 1435 copyin(lmsg.msg_iov, aiov, 1436 (unsigned)iovcnt * sizeof (struct iovec))) { 1437 if (iovsize != 0) 1438 kmem_free(aiov, iovsize); 1439 1440 return (set_errno(EFAULT)); 1441 } 1442 len = 0; 1443 for (i = 0; i < iovcnt; i++) { 1444 ssize_t iovlen = aiov[i].iov_len; 1445 len += iovlen; 1446 if (iovlen < 0 || len < 0) { 1447 if (iovsize != 0) 1448 kmem_free(aiov, iovsize); 1449 1450 return (set_errno(EINVAL)); 1451 } 1452 } 1453 auio.uio_loffset = 0; 1454 auio.uio_iov = aiov; 1455 auio.uio_iovcnt = iovcnt; 1456 auio.uio_resid = len; 1457 auio.uio_segflg = UIO_USERSPACE; 1458 auio.uio_limit = 0; 1459 1460 rval = sendit(sock, &lmsg, &auio, flags); 1461 1462 if (iovsize != 0) 1463 kmem_free(aiov, iovsize); 1464 1465 return (rval); 1466 } 1467 1468 ssize_t 1469 sendto(int sock, void *buffer, size_t len, int flags, 1470 struct sockaddr *name, socklen_t namelen) 1471 { 1472 struct nmsghdr lmsg; 1473 struct uio auio; 1474 struct iovec aiov[1]; 1475 1476 dprint(1, ("sendto(%d, %p, %ld, %d, %p, %d)\n", 1477 sock, buffer, len, flags, (void *)name, namelen)); 1478 1479 if ((ssize_t)len < 0) { 1480 return (set_errno(EINVAL)); 1481 } 1482 1483 aiov[0].iov_base = buffer; 1484 aiov[0].iov_len = len; 1485 auio.uio_loffset = 0; 1486 auio.uio_iov = aiov; 1487 auio.uio_iovcnt = 1; 1488 auio.uio_resid = len; 1489 auio.uio_segflg = UIO_USERSPACE; 1490 auio.uio_limit = 0; 1491 1492 lmsg.msg_name = (char *)name; 1493 lmsg.msg_namelen = namelen; 1494 lmsg.msg_control = NULL; 1495 if (!(flags & MSG_XPG4_2)) { 1496 /* 1497 * In order to be compatible with the libsocket/sockmod 1498 * implementation we set EOR for all send* calls. 1499 */ 1500 flags |= MSG_EOR; 1501 } 1502 return (sendit(sock, &lmsg, &auio, flags)); 1503 } 1504 1505 /*ARGSUSED3*/ 1506 int 1507 getpeername(int sock, struct sockaddr *name, socklen_t *namelenp, int version) 1508 { 1509 struct sonode *so; 1510 int error; 1511 socklen_t namelen; 1512 socklen_t sock_addrlen; 1513 struct sockaddr *sock_addrp; 1514 1515 dprint(1, ("getpeername(%d, %p, %p)\n", 1516 sock, (void *)name, (void *)namelenp)); 1517 1518 if ((so = getsonode(sock, &error, NULL)) == NULL) 1519 goto bad; 1520 1521 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1522 if (copyin(namelenp, &namelen, sizeof (namelen)) || 1523 (name == NULL && namelen != 0)) { 1524 error = EFAULT; 1525 goto rel_out; 1526 } 1527 sock_addrlen = so->so_max_addr_len; 1528 sock_addrp = (struct sockaddr *)kmem_alloc(sock_addrlen, KM_SLEEP); 1529 1530 if ((error = socket_getpeername(so, sock_addrp, &sock_addrlen, 1531 B_FALSE, CRED())) == 0) { 1532 ASSERT(sock_addrlen <= so->so_max_addr_len); 1533 error = copyout_name(name, namelen, namelenp, 1534 (void *)sock_addrp, sock_addrlen); 1535 } 1536 kmem_free(sock_addrp, so->so_max_addr_len); 1537 rel_out: 1538 releasef(sock); 1539 bad: return (error != 0 ? set_errno(error) : 0); 1540 } 1541 1542 /*ARGSUSED3*/ 1543 int 1544 getsockname(int sock, struct sockaddr *name, socklen_t *namelenp, int version) 1545 { 1546 struct sonode *so; 1547 int error; 1548 socklen_t namelen, sock_addrlen; 1549 struct sockaddr *sock_addrp; 1550 1551 dprint(1, ("getsockname(%d, %p, %p)\n", 1552 sock, (void *)name, (void *)namelenp)); 1553 1554 if ((so = getsonode(sock, &error, NULL)) == NULL) 1555 goto bad; 1556 1557 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1558 if (copyin(namelenp, &namelen, sizeof (namelen)) || 1559 (name == NULL && namelen != 0)) { 1560 error = EFAULT; 1561 goto rel_out; 1562 } 1563 1564 sock_addrlen = so->so_max_addr_len; 1565 sock_addrp = (struct sockaddr *)kmem_alloc(sock_addrlen, KM_SLEEP); 1566 if ((error = socket_getsockname(so, sock_addrp, &sock_addrlen, 1567 CRED())) == 0) { 1568 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1569 ASSERT(sock_addrlen <= so->so_max_addr_len); 1570 error = copyout_name(name, namelen, namelenp, 1571 (void *)sock_addrp, sock_addrlen); 1572 } 1573 kmem_free(sock_addrp, so->so_max_addr_len); 1574 rel_out: 1575 releasef(sock); 1576 bad: return (error != 0 ? set_errno(error) : 0); 1577 } 1578 1579 /*ARGSUSED5*/ 1580 int 1581 getsockopt(int sock, int level, int option_name, void *option_value, 1582 socklen_t *option_lenp, int version) 1583 { 1584 struct sonode *so; 1585 socklen_t optlen, optlen_res; 1586 void *optval; 1587 int error; 1588 1589 dprint(1, ("getsockopt(%d, %d, %d, %p, %p)\n", 1590 sock, level, option_name, option_value, (void *)option_lenp)); 1591 1592 if ((so = getsonode(sock, &error, NULL)) == NULL) 1593 return (set_errno(error)); 1594 1595 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1596 if (copyin(option_lenp, &optlen, sizeof (optlen))) { 1597 releasef(sock); 1598 return (set_errno(EFAULT)); 1599 } 1600 /* 1601 * Verify that the length is not excessive to prevent 1602 * an application from consuming all of kernel memory. 1603 */ 1604 if (optlen > SO_MAXARGSIZE) { 1605 error = EINVAL; 1606 releasef(sock); 1607 return (set_errno(error)); 1608 } 1609 optval = kmem_alloc(optlen, KM_SLEEP); 1610 optlen_res = optlen; 1611 error = socket_getsockopt(so, level, option_name, optval, 1612 &optlen_res, (version != SOV_XPG4_2) ? 0 : _SOGETSOCKOPT_XPG4_2, 1613 CRED()); 1614 releasef(sock); 1615 if (error) { 1616 kmem_free(optval, optlen); 1617 return (set_errno(error)); 1618 } 1619 error = copyout_arg(option_value, optlen, option_lenp, 1620 optval, optlen_res); 1621 kmem_free(optval, optlen); 1622 if (error) 1623 return (set_errno(error)); 1624 return (0); 1625 } 1626 1627 /*ARGSUSED5*/ 1628 int 1629 setsockopt(int sock, int level, int option_name, void *option_value, 1630 socklen_t option_len, int version) 1631 { 1632 struct sonode *so; 1633 intptr_t buffer[2]; 1634 void *optval = NULL; 1635 int error; 1636 1637 dprint(1, ("setsockopt(%d, %d, %d, %p, %d)\n", 1638 sock, level, option_name, option_value, option_len)); 1639 1640 if ((so = getsonode(sock, &error, NULL)) == NULL) 1641 return (set_errno(error)); 1642 1643 if (option_value != NULL) { 1644 if (option_len != 0) { 1645 /* 1646 * Verify that the length is not excessive to prevent 1647 * an application from consuming all of kernel memory. 1648 */ 1649 if (option_len > SO_MAXARGSIZE) { 1650 error = EINVAL; 1651 goto done2; 1652 } 1653 optval = option_len <= sizeof (buffer) ? 1654 &buffer : kmem_alloc((size_t)option_len, KM_SLEEP); 1655 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1656 if (copyin(option_value, optval, (size_t)option_len)) { 1657 error = EFAULT; 1658 goto done1; 1659 } 1660 } 1661 } else 1662 option_len = 0; 1663 1664 error = socket_setsockopt(so, level, option_name, optval, 1665 (t_uscalar_t)option_len, CRED()); 1666 done1: 1667 if (optval != buffer) 1668 kmem_free(optval, (size_t)option_len); 1669 done2: 1670 releasef(sock); 1671 if (error) 1672 return (set_errno(error)); 1673 return (0); 1674 } 1675 1676 static int 1677 sockconf_add_sock(int family, int type, int protocol, char *name) 1678 { 1679 int error = 0; 1680 char *kdevpath = NULL; 1681 char *kmodule = NULL; 1682 char *buf = NULL; 1683 size_t pathlen = 0; 1684 struct sockparams *sp; 1685 1686 if (name == NULL) 1687 return (EINVAL); 1688 /* 1689 * Copyin the name. 1690 * This also makes it possible to check for too long pathnames. 1691 * Compress the space needed for the name before passing it 1692 * to soconfig - soconfig will store the string until 1693 * the configuration is removed. 1694 */ 1695 buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1696 if ((error = copyinstr(name, buf, MAXPATHLEN, &pathlen)) != 0) { 1697 kmem_free(buf, MAXPATHLEN); 1698 return (error); 1699 } 1700 if (strncmp(buf, "/dev", strlen("/dev")) == 0) { 1701 /* For device */ 1702 kdevpath = kmem_alloc(pathlen, KM_SLEEP); 1703 bcopy(buf, kdevpath, pathlen); 1704 kdevpath[pathlen - 1] = '\0'; 1705 } else { 1706 /* For socket module */ 1707 kmodule = kmem_alloc(pathlen, KM_SLEEP); 1708 bcopy(buf, kmodule, pathlen); 1709 kmodule[pathlen - 1] = '\0'; 1710 pathlen = 0; 1711 } 1712 kmem_free(buf, MAXPATHLEN); 1713 1714 /* sockparams_create frees mod name and devpath upon failure */ 1715 sp = sockparams_create(family, type, protocol, kmodule, 1716 kdevpath, pathlen, 0, KM_SLEEP, &error); 1717 if (sp != NULL) { 1718 error = sockparams_add(sp); 1719 if (error != 0) 1720 sockparams_destroy(sp); 1721 } 1722 1723 return (error); 1724 } 1725 1726 static int 1727 sockconf_remove_sock(int family, int type, int protocol) 1728 { 1729 return (sockparams_delete(family, type, protocol)); 1730 } 1731 1732 static int 1733 sockconfig_remove_filter(const char *uname) 1734 { 1735 char kname[SOF_MAXNAMELEN]; 1736 size_t len; 1737 int error; 1738 sof_entry_t *ent; 1739 1740 if ((error = copyinstr(uname, kname, SOF_MAXNAMELEN, &len)) != 0) 1741 return (error); 1742 1743 ent = sof_entry_remove_by_name(kname); 1744 if (ent == NULL) 1745 return (ENXIO); 1746 1747 mutex_enter(&ent->sofe_lock); 1748 ASSERT(!(ent->sofe_flags & SOFEF_CONDEMED)); 1749 if (ent->sofe_refcnt == 0) { 1750 mutex_exit(&ent->sofe_lock); 1751 sof_entry_free(ent); 1752 } else { 1753 /* let the last socket free the filter */ 1754 ent->sofe_flags |= SOFEF_CONDEMED; 1755 mutex_exit(&ent->sofe_lock); 1756 } 1757 1758 return (0); 1759 } 1760 1761 static int 1762 sockconfig_add_filter(const char *uname, void *ufilpropp) 1763 { 1764 struct sockconfig_filter_props filprop; 1765 sof_entry_t *ent; 1766 int error; 1767 size_t tuplesz, len; 1768 char hintbuf[SOF_MAXNAMELEN]; 1769 1770 ent = kmem_zalloc(sizeof (sof_entry_t), KM_SLEEP); 1771 mutex_init(&ent->sofe_lock, NULL, MUTEX_DEFAULT, NULL); 1772 1773 if ((error = copyinstr(uname, ent->sofe_name, SOF_MAXNAMELEN, 1774 &len)) != 0) { 1775 sof_entry_free(ent); 1776 return (error); 1777 } 1778 1779 if (get_udatamodel() == DATAMODEL_NATIVE) { 1780 if (copyin(ufilpropp, &filprop, sizeof (filprop)) != 0) { 1781 sof_entry_free(ent); 1782 return (EFAULT); 1783 } 1784 } 1785 #ifdef _SYSCALL32_IMPL 1786 else { 1787 struct sockconfig_filter_props32 filprop32; 1788 1789 if (copyin(ufilpropp, &filprop32, sizeof (filprop32)) != 0) { 1790 sof_entry_free(ent); 1791 return (EFAULT); 1792 } 1793 filprop.sfp_modname = (char *)(uintptr_t)filprop32.sfp_modname; 1794 filprop.sfp_autoattach = filprop32.sfp_autoattach; 1795 filprop.sfp_hint = filprop32.sfp_hint; 1796 filprop.sfp_hintarg = (char *)(uintptr_t)filprop32.sfp_hintarg; 1797 filprop.sfp_socktuple_cnt = filprop32.sfp_socktuple_cnt; 1798 filprop.sfp_socktuple = 1799 (sof_socktuple_t *)(uintptr_t)filprop32.sfp_socktuple; 1800 } 1801 #endif /* _SYSCALL32_IMPL */ 1802 1803 if ((error = copyinstr(filprop.sfp_modname, ent->sofe_modname, 1804 sizeof (ent->sofe_modname), &len)) != 0) { 1805 sof_entry_free(ent); 1806 return (error); 1807 } 1808 1809 /* 1810 * A filter must specify at least one socket tuple. 1811 */ 1812 if (filprop.sfp_socktuple_cnt == 0 || 1813 filprop.sfp_socktuple_cnt > SOF_MAXSOCKTUPLECNT) { 1814 sof_entry_free(ent); 1815 return (EINVAL); 1816 } 1817 ent->sofe_flags = filprop.sfp_autoattach ? SOFEF_AUTO : SOFEF_PROG; 1818 ent->sofe_hint = filprop.sfp_hint; 1819 1820 /* 1821 * Verify the hint, and copy in the hint argument, if necessary. 1822 */ 1823 switch (ent->sofe_hint) { 1824 case SOF_HINT_BEFORE: 1825 case SOF_HINT_AFTER: 1826 if ((error = copyinstr(filprop.sfp_hintarg, hintbuf, 1827 sizeof (hintbuf), &len)) != 0) { 1828 sof_entry_free(ent); 1829 return (error); 1830 } 1831 ent->sofe_hintarg = kmem_alloc(len, KM_SLEEP); 1832 bcopy(hintbuf, ent->sofe_hintarg, len); 1833 /* FALLTHRU */ 1834 case SOF_HINT_TOP: 1835 case SOF_HINT_BOTTOM: 1836 /* hints cannot be used with programmatic filters */ 1837 if (ent->sofe_flags & SOFEF_PROG) { 1838 sof_entry_free(ent); 1839 return (EINVAL); 1840 } 1841 break; 1842 case SOF_HINT_NONE: 1843 break; 1844 default: 1845 /* bad hint value */ 1846 sof_entry_free(ent); 1847 return (EINVAL); 1848 } 1849 1850 ent->sofe_socktuple_cnt = filprop.sfp_socktuple_cnt; 1851 tuplesz = sizeof (sof_socktuple_t) * ent->sofe_socktuple_cnt; 1852 ent->sofe_socktuple = kmem_alloc(tuplesz, KM_SLEEP); 1853 1854 if (get_udatamodel() == DATAMODEL_NATIVE) { 1855 if (copyin(filprop.sfp_socktuple, ent->sofe_socktuple, 1856 tuplesz)) { 1857 sof_entry_free(ent); 1858 return (EFAULT); 1859 } 1860 } 1861 #ifdef _SYSCALL32_IMPL 1862 else { 1863 int i; 1864 caddr_t data = (caddr_t)filprop.sfp_socktuple; 1865 sof_socktuple_t *tup = ent->sofe_socktuple; 1866 sof_socktuple32_t tup32; 1867 1868 tup = ent->sofe_socktuple; 1869 for (i = 0; i < ent->sofe_socktuple_cnt; i++, tup++) { 1870 ASSERT(tup < ent->sofe_socktuple + tuplesz); 1871 1872 if (copyin(data, &tup32, sizeof (tup32)) != 0) { 1873 sof_entry_free(ent); 1874 return (EFAULT); 1875 } 1876 tup->sofst_family = tup32.sofst_family; 1877 tup->sofst_type = tup32.sofst_type; 1878 tup->sofst_protocol = tup32.sofst_protocol; 1879 1880 data += sizeof (tup32); 1881 } 1882 } 1883 #endif /* _SYSCALL32_IMPL */ 1884 1885 /* Sockets can start using the filter as soon as the filter is added */ 1886 if ((error = sof_entry_add(ent)) != 0) 1887 sof_entry_free(ent); 1888 1889 return (error); 1890 } 1891 1892 /* 1893 * Socket configuration system call. It is used to add and remove 1894 * socket types. 1895 */ 1896 int 1897 sockconfig(int cmd, void *arg1, void *arg2, void *arg3, void *arg4) 1898 { 1899 int error = 0; 1900 1901 if (secpolicy_net_config(CRED(), B_FALSE) != 0) 1902 return (set_errno(EPERM)); 1903 1904 switch (cmd) { 1905 case SOCKCONFIG_ADD_SOCK: 1906 error = sockconf_add_sock((int)(uintptr_t)arg1, 1907 (int)(uintptr_t)arg2, (int)(uintptr_t)arg3, arg4); 1908 break; 1909 case SOCKCONFIG_REMOVE_SOCK: 1910 error = sockconf_remove_sock((int)(uintptr_t)arg1, 1911 (int)(uintptr_t)arg2, (int)(uintptr_t)arg3); 1912 break; 1913 case SOCKCONFIG_ADD_FILTER: 1914 error = sockconfig_add_filter((const char *)arg1, arg2); 1915 break; 1916 case SOCKCONFIG_REMOVE_FILTER: 1917 error = sockconfig_remove_filter((const char *)arg1); 1918 break; 1919 case SOCKCONFIG_GET_SOCKTABLE: 1920 error = sockparams_copyout_socktable((int)(uintptr_t)arg1); 1921 break; 1922 default: 1923 #ifdef DEBUG 1924 cmn_err(CE_NOTE, "sockconfig: unkonwn subcommand %d", cmd); 1925 #endif 1926 error = EINVAL; 1927 break; 1928 } 1929 1930 if (error != 0) { 1931 eprintline(error); 1932 return (set_errno(error)); 1933 } 1934 return (0); 1935 } 1936 1937 1938 /* 1939 * Sendfile is implemented through two schemes, direct I/O or by 1940 * caching in the filesystem page cache. We cache the input file by 1941 * default and use direct I/O only if sendfile_max_size is set 1942 * appropriately as explained below. Note that this logic is consistent 1943 * with other filesystems where caching is turned on by default 1944 * unless explicitly turned off by using the DIRECTIO ioctl. 1945 * 1946 * We choose a slightly different scheme here. One can turn off 1947 * caching by setting sendfile_max_size to 0. One can also enable 1948 * caching of files <= sendfile_max_size by setting sendfile_max_size 1949 * to an appropriate value. By default sendfile_max_size is set to the 1950 * maximum value so that all files are cached. In future, we may provide 1951 * better interfaces for caching the file. 1952 * 1953 * Sendfile through Direct I/O (Zero copy) 1954 * -------------------------------------- 1955 * 1956 * As disks are normally slower than the network, we can't have a 1957 * single thread that reads the disk and writes to the network. We 1958 * need to have parallelism. This is done by having the sendfile 1959 * thread create another thread that reads from the filesystem 1960 * and queues it for network processing. In this scheme, the data 1961 * is never copied anywhere i.e it is zero copy unlike the other 1962 * scheme. 1963 * 1964 * We have a sendfile queue (snfq) where each sendfile 1965 * request (snf_req_t) is queued for processing by a thread. Number 1966 * of threads is dynamically allocated and they exit if they are idling 1967 * beyond a specified amount of time. When each request (snf_req_t) is 1968 * processed by a thread, it produces a number of mblk_t structures to 1969 * be consumed by the sendfile thread. snf_deque and snf_enque are 1970 * used for consuming and producing mblks. Size of the filesystem 1971 * read is determined by the tunable (sendfile_read_size). A single 1972 * mblk holds sendfile_read_size worth of data (except the last 1973 * read of the file) which is sent down as a whole to the network. 1974 * sendfile_read_size is set to 1 MB as this seems to be the optimal 1975 * value for the UFS filesystem backed by a striped storage array. 1976 * 1977 * Synchronisation between read (producer) and write (consumer) threads. 1978 * -------------------------------------------------------------------- 1979 * 1980 * sr_lock protects sr_ib_head and sr_ib_tail. The lock is held while 1981 * adding and deleting items in this list. Error can happen anytime 1982 * during read or write. There could be unprocessed mblks in the 1983 * sr_ib_XXX list when a read or write error occurs. Whenever error 1984 * is encountered, we need two things to happen : 1985 * 1986 * a) One of the threads need to clean the mblks. 1987 * b) When one thread encounters an error, the other should stop. 1988 * 1989 * For (a), we don't want to penalize the reader thread as it could do 1990 * some useful work processing other requests. For (b), the error can 1991 * be detected by examining sr_read_error or sr_write_error. 1992 * sr_lock protects sr_read_error and sr_write_error. If both reader and 1993 * writer encounters error, we need to report the write error back to 1994 * the application as that's what would have happened if the operations 1995 * were done sequentially. With this in mind, following should work : 1996 * 1997 * - Check for errors before read or write. 1998 * - If the reader encounters error, set the error in sr_read_error. 1999 * Check sr_write_error, if it is set, send cv_signal as it is 2000 * waiting for reader to complete. If it is not set, the writer 2001 * is either running sinking data to the network or blocked 2002 * because of flow control. For handling the latter case, we 2003 * always send a signal. In any case, it will examine sr_read_error 2004 * and return. sr_read_error is marked with SR_READ_DONE to tell 2005 * the writer that the reader is done in all the cases. 2006 * - If the writer encounters error, set the error in sr_write_error. 2007 * The reader thread is either blocked because of flow control or 2008 * running reading data from the disk. For the former, we need to 2009 * wakeup the thread. Again to keep it simple, we always wake up 2010 * the reader thread. Then, wait for the read thread to complete 2011 * if it is not done yet. Cleanup and return. 2012 * 2013 * High and low water marks for the read thread. 2014 * -------------------------------------------- 2015 * 2016 * If sendfile() is used to send data over a slow network, we need to 2017 * make sure that the read thread does not produce data at a faster 2018 * rate than the network. This can happen if the disk is faster than 2019 * the network. In such a case, we don't want to build a very large queue. 2020 * But we would still like to get all of the network throughput possible. 2021 * This implies that network should never block waiting for data. 2022 * As there are lot of disk throughput/network throughput combinations 2023 * possible, it is difficult to come up with an accurate number. 2024 * A typical 10K RPM disk has a max seek latency 17ms and rotational 2025 * latency of 3ms for reading a disk block. Thus, the total latency to 2026 * initiate a new read, transfer data from the disk and queue for 2027 * transmission would take about a max of 25ms. Todays max transfer rate 2028 * for network is 100MB/sec. If the thread is blocked because of flow 2029 * control, it would take 25ms to get new data ready for transmission. 2030 * We have to make sure that network is not idling, while we are initiating 2031 * new transfers. So, at 100MB/sec, to keep network busy we would need 2032 * 2.5MB of data. Rounding off, we keep the low water mark to be 3MB of data. 2033 * We need to pick a high water mark so that the woken up thread would 2034 * do considerable work before blocking again to prevent thrashing. Currently, 2035 * we pick this to be 10 times that of the low water mark. 2036 * 2037 * Sendfile with segmap caching (One copy from page cache to mblks). 2038 * ---------------------------------------------------------------- 2039 * 2040 * We use the segmap cache for caching the file, if the size of file 2041 * is <= sendfile_max_size. In this case we don't use threads as VM 2042 * is reasonably fast enough to keep up with the network. If the underlying 2043 * transport allows, we call segmap_getmapflt() to map MAXBSIZE (8K) worth 2044 * of data into segmap space, and use the virtual address from segmap 2045 * directly through desballoc() to avoid copy. Once the transport is done 2046 * with the data, the mapping will be released through segmap_release() 2047 * called by the call-back routine. 2048 * 2049 * If zero-copy is not allowed by the transport, we simply call VOP_READ() 2050 * to copy the data from the filesystem into our temporary network buffer. 2051 * 2052 * To disable caching, set sendfile_max_size to 0. 2053 */ 2054 2055 uint_t sendfile_read_size = 1024 * 1024; 2056 #define SENDFILE_REQ_LOWAT 3 * 1024 * 1024 2057 uint_t sendfile_req_lowat = SENDFILE_REQ_LOWAT; 2058 uint_t sendfile_req_hiwat = 10 * SENDFILE_REQ_LOWAT; 2059 struct sendfile_stats sf_stats; 2060 struct sendfile_queue *snfq; 2061 clock_t snfq_timeout; 2062 off64_t sendfile_max_size; 2063 2064 static void snf_enque(snf_req_t *, mblk_t *); 2065 static mblk_t *snf_deque(snf_req_t *); 2066 2067 void 2068 sendfile_init(void) 2069 { 2070 snfq = kmem_zalloc(sizeof (struct sendfile_queue), KM_SLEEP); 2071 2072 mutex_init(&snfq->snfq_lock, NULL, MUTEX_DEFAULT, NULL); 2073 cv_init(&snfq->snfq_cv, NULL, CV_DEFAULT, NULL); 2074 snfq->snfq_max_threads = max_ncpus; 2075 snfq_timeout = SNFQ_TIMEOUT; 2076 /* Cache all files by default. */ 2077 sendfile_max_size = MAXOFFSET_T; 2078 } 2079 2080 /* 2081 * Queues a mblk_t for network processing. 2082 */ 2083 static void 2084 snf_enque(snf_req_t *sr, mblk_t *mp) 2085 { 2086 mp->b_next = NULL; 2087 mutex_enter(&sr->sr_lock); 2088 if (sr->sr_mp_head == NULL) { 2089 sr->sr_mp_head = sr->sr_mp_tail = mp; 2090 cv_signal(&sr->sr_cv); 2091 } else { 2092 sr->sr_mp_tail->b_next = mp; 2093 sr->sr_mp_tail = mp; 2094 } 2095 sr->sr_qlen += MBLKL(mp); 2096 while ((sr->sr_qlen > sr->sr_hiwat) && 2097 (sr->sr_write_error == 0)) { 2098 sf_stats.ss_full_waits++; 2099 cv_wait(&sr->sr_cv, &sr->sr_lock); 2100 } 2101 mutex_exit(&sr->sr_lock); 2102 } 2103 2104 /* 2105 * De-queues a mblk_t for network processing. 2106 */ 2107 static mblk_t * 2108 snf_deque(snf_req_t *sr) 2109 { 2110 mblk_t *mp; 2111 2112 mutex_enter(&sr->sr_lock); 2113 /* 2114 * If we have encountered an error on read or read is 2115 * completed and no more mblks, return NULL. 2116 * We need to check for NULL sr_mp_head also as 2117 * the reads could have completed and there is 2118 * nothing more to come. 2119 */ 2120 if (((sr->sr_read_error & ~SR_READ_DONE) != 0) || 2121 ((sr->sr_read_error & SR_READ_DONE) && 2122 sr->sr_mp_head == NULL)) { 2123 mutex_exit(&sr->sr_lock); 2124 return (NULL); 2125 } 2126 /* 2127 * To start with neither SR_READ_DONE is marked nor 2128 * the error is set. When we wake up from cv_wait, 2129 * following are the possibilities : 2130 * 2131 * a) sr_read_error is zero and mblks are queued. 2132 * b) sr_read_error is set to SR_READ_DONE 2133 * and mblks are queued. 2134 * c) sr_read_error is set to SR_READ_DONE 2135 * and no mblks. 2136 * d) sr_read_error is set to some error other 2137 * than SR_READ_DONE. 2138 */ 2139 2140 while ((sr->sr_read_error == 0) && (sr->sr_mp_head == NULL)) { 2141 sf_stats.ss_empty_waits++; 2142 cv_wait(&sr->sr_cv, &sr->sr_lock); 2143 } 2144 /* Handle (a) and (b) first - the normal case. */ 2145 if (((sr->sr_read_error & ~SR_READ_DONE) == 0) && 2146 (sr->sr_mp_head != NULL)) { 2147 mp = sr->sr_mp_head; 2148 sr->sr_mp_head = mp->b_next; 2149 sr->sr_qlen -= MBLKL(mp); 2150 if (sr->sr_qlen < sr->sr_lowat) 2151 cv_signal(&sr->sr_cv); 2152 mutex_exit(&sr->sr_lock); 2153 mp->b_next = NULL; 2154 return (mp); 2155 } 2156 /* Handle (c) and (d). */ 2157 mutex_exit(&sr->sr_lock); 2158 return (NULL); 2159 } 2160 2161 /* 2162 * Reads data from the filesystem and queues it for network processing. 2163 */ 2164 void 2165 snf_async_read(snf_req_t *sr) 2166 { 2167 size_t iosize; 2168 u_offset_t fileoff; 2169 u_offset_t size; 2170 int ret_size; 2171 int error; 2172 file_t *fp; 2173 mblk_t *mp; 2174 struct vnode *vp; 2175 int extra = 0; 2176 int maxblk = 0; 2177 int wroff = 0; 2178 struct sonode *so; 2179 2180 fp = sr->sr_fp; 2181 size = sr->sr_file_size; 2182 fileoff = sr->sr_file_off; 2183 2184 /* 2185 * Ignore the error for filesystems that doesn't support DIRECTIO. 2186 */ 2187 (void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_ON, 0, 2188 kcred, NULL, NULL); 2189 2190 vp = sr->sr_vp; 2191 if (vp->v_type == VSOCK) { 2192 stdata_t *stp; 2193 2194 /* 2195 * Get the extra space to insert a header and a trailer. 2196 */ 2197 so = VTOSO(vp); 2198 stp = vp->v_stream; 2199 if (stp == NULL) { 2200 wroff = so->so_proto_props.sopp_wroff; 2201 maxblk = so->so_proto_props.sopp_maxblk; 2202 extra = wroff + so->so_proto_props.sopp_tail; 2203 } else { 2204 wroff = (int)(stp->sd_wroff); 2205 maxblk = (int)(stp->sd_maxblk); 2206 extra = wroff + (int)(stp->sd_tail); 2207 } 2208 } 2209 2210 while ((size != 0) && (sr->sr_write_error == 0)) { 2211 2212 iosize = (int)MIN(sr->sr_maxpsz, size); 2213 2214 /* 2215 * Socket filters can limit the mblk size, 2216 * so limit reads to maxblk if there are 2217 * filters present. 2218 */ 2219 if (vp->v_type == VSOCK && 2220 so->so_filter_active > 0 && maxblk != INFPSZ) 2221 iosize = (int)MIN(iosize, maxblk); 2222 2223 if (is_system_labeled()) { 2224 mp = allocb_cred(iosize + extra, CRED(), 2225 curproc->p_pid); 2226 } else { 2227 mp = allocb(iosize + extra, BPRI_MED); 2228 } 2229 if (mp == NULL) { 2230 error = EAGAIN; 2231 break; 2232 } 2233 2234 mp->b_rptr += wroff; 2235 2236 ret_size = soreadfile(fp, mp->b_rptr, fileoff, &error, iosize); 2237 2238 /* Error or Reached EOF ? */ 2239 if ((error != 0) || (ret_size == 0)) { 2240 freeb(mp); 2241 break; 2242 } 2243 mp->b_wptr = mp->b_rptr + ret_size; 2244 2245 snf_enque(sr, mp); 2246 size -= ret_size; 2247 fileoff += ret_size; 2248 } 2249 (void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_OFF, 0, 2250 kcred, NULL, NULL); 2251 mutex_enter(&sr->sr_lock); 2252 sr->sr_read_error = error; 2253 sr->sr_read_error |= SR_READ_DONE; 2254 cv_signal(&sr->sr_cv); 2255 mutex_exit(&sr->sr_lock); 2256 } 2257 2258 void 2259 snf_async_thread(void) 2260 { 2261 snf_req_t *sr; 2262 callb_cpr_t cprinfo; 2263 clock_t time_left = 1; 2264 2265 CALLB_CPR_INIT(&cprinfo, &snfq->snfq_lock, callb_generic_cpr, "snfq"); 2266 2267 mutex_enter(&snfq->snfq_lock); 2268 for (;;) { 2269 /* 2270 * If we didn't find a entry, then block until woken up 2271 * again and then look through the queues again. 2272 */ 2273 while ((sr = snfq->snfq_req_head) == NULL) { 2274 CALLB_CPR_SAFE_BEGIN(&cprinfo); 2275 if (time_left <= 0) { 2276 snfq->snfq_svc_threads--; 2277 CALLB_CPR_EXIT(&cprinfo); 2278 thread_exit(); 2279 /* NOTREACHED */ 2280 } 2281 snfq->snfq_idle_cnt++; 2282 2283 time_left = cv_reltimedwait(&snfq->snfq_cv, 2284 &snfq->snfq_lock, snfq_timeout, TR_CLOCK_TICK); 2285 snfq->snfq_idle_cnt--; 2286 2287 CALLB_CPR_SAFE_END(&cprinfo, &snfq->snfq_lock); 2288 } 2289 snfq->snfq_req_head = sr->sr_next; 2290 snfq->snfq_req_cnt--; 2291 mutex_exit(&snfq->snfq_lock); 2292 snf_async_read(sr); 2293 mutex_enter(&snfq->snfq_lock); 2294 } 2295 } 2296 2297 2298 snf_req_t * 2299 create_thread(int operation, struct vnode *vp, file_t *fp, 2300 u_offset_t fileoff, u_offset_t size) 2301 { 2302 snf_req_t *sr; 2303 stdata_t *stp; 2304 2305 sr = (snf_req_t *)kmem_zalloc(sizeof (snf_req_t), KM_SLEEP); 2306 2307 sr->sr_vp = vp; 2308 sr->sr_fp = fp; 2309 stp = vp->v_stream; 2310 2311 /* 2312 * store sd_qn_maxpsz into sr_maxpsz while we have stream head. 2313 * stream might be closed before thread returns from snf_async_read. 2314 */ 2315 if (stp != NULL && stp->sd_qn_maxpsz > 0) { 2316 sr->sr_maxpsz = MIN(MAXBSIZE, stp->sd_qn_maxpsz); 2317 } else { 2318 sr->sr_maxpsz = MAXBSIZE; 2319 } 2320 2321 sr->sr_operation = operation; 2322 sr->sr_file_off = fileoff; 2323 sr->sr_file_size = size; 2324 sr->sr_hiwat = sendfile_req_hiwat; 2325 sr->sr_lowat = sendfile_req_lowat; 2326 mutex_init(&sr->sr_lock, NULL, MUTEX_DEFAULT, NULL); 2327 cv_init(&sr->sr_cv, NULL, CV_DEFAULT, NULL); 2328 /* 2329 * See whether we need another thread for servicing this 2330 * request. If there are already enough requests queued 2331 * for the threads, create one if not exceeding 2332 * snfq_max_threads. 2333 */ 2334 mutex_enter(&snfq->snfq_lock); 2335 if (snfq->snfq_req_cnt >= snfq->snfq_idle_cnt && 2336 snfq->snfq_svc_threads < snfq->snfq_max_threads) { 2337 (void) thread_create(NULL, 0, &snf_async_thread, 0, 0, &p0, 2338 TS_RUN, minclsyspri); 2339 snfq->snfq_svc_threads++; 2340 } 2341 if (snfq->snfq_req_head == NULL) { 2342 snfq->snfq_req_head = snfq->snfq_req_tail = sr; 2343 cv_signal(&snfq->snfq_cv); 2344 } else { 2345 snfq->snfq_req_tail->sr_next = sr; 2346 snfq->snfq_req_tail = sr; 2347 } 2348 snfq->snfq_req_cnt++; 2349 mutex_exit(&snfq->snfq_lock); 2350 return (sr); 2351 } 2352 2353 int 2354 snf_direct_io(file_t *fp, file_t *rfp, u_offset_t fileoff, u_offset_t size, 2355 ssize_t *count) 2356 { 2357 snf_req_t *sr; 2358 mblk_t *mp; 2359 int iosize; 2360 int error = 0; 2361 short fflag; 2362 struct vnode *vp; 2363 int ksize; 2364 struct nmsghdr msg; 2365 2366 ksize = 0; 2367 *count = 0; 2368 bzero(&msg, sizeof (msg)); 2369 2370 vp = fp->f_vnode; 2371 fflag = fp->f_flag; 2372 if ((sr = create_thread(READ_OP, vp, rfp, fileoff, size)) == NULL) 2373 return (EAGAIN); 2374 2375 /* 2376 * We check for read error in snf_deque. It has to check 2377 * for successful READ_DONE and return NULL, and we might 2378 * as well make an additional check there. 2379 */ 2380 while ((mp = snf_deque(sr)) != NULL) { 2381 2382 if (ISSIG(curthread, JUSTLOOKING)) { 2383 freeb(mp); 2384 error = EINTR; 2385 break; 2386 } 2387 iosize = MBLKL(mp); 2388 2389 error = socket_sendmblk(VTOSO(vp), &msg, fflag, CRED(), &mp); 2390 2391 if (error != 0) { 2392 if (mp != NULL) 2393 freeb(mp); 2394 break; 2395 } 2396 ksize += iosize; 2397 } 2398 *count = ksize; 2399 2400 mutex_enter(&sr->sr_lock); 2401 sr->sr_write_error = error; 2402 /* Look at the big comments on why we cv_signal here. */ 2403 cv_signal(&sr->sr_cv); 2404 2405 /* Wait for the reader to complete always. */ 2406 while (!(sr->sr_read_error & SR_READ_DONE)) { 2407 cv_wait(&sr->sr_cv, &sr->sr_lock); 2408 } 2409 /* If there is no write error, check for read error. */ 2410 if (error == 0) 2411 error = (sr->sr_read_error & ~SR_READ_DONE); 2412 2413 if (error != 0) { 2414 mblk_t *next_mp; 2415 2416 mp = sr->sr_mp_head; 2417 while (mp != NULL) { 2418 next_mp = mp->b_next; 2419 mp->b_next = NULL; 2420 freeb(mp); 2421 mp = next_mp; 2422 } 2423 } 2424 mutex_exit(&sr->sr_lock); 2425 kmem_free(sr, sizeof (snf_req_t)); 2426 return (error); 2427 } 2428 2429 /* Maximum no.of pages allocated by vpm for sendfile at a time */ 2430 #define SNF_VPMMAXPGS (VPMMAXPGS/2) 2431 2432 /* 2433 * Maximum no.of elements in the list returned by vpm, including 2434 * NULL for the last entry 2435 */ 2436 #define SNF_MAXVMAPS (SNF_VPMMAXPGS + 1) 2437 2438 typedef struct { 2439 unsigned int snfv_ref; 2440 frtn_t snfv_frtn; 2441 vnode_t *snfv_vp; 2442 struct vmap snfv_vml[SNF_MAXVMAPS]; 2443 } snf_vmap_desbinfo; 2444 2445 typedef struct { 2446 frtn_t snfi_frtn; 2447 caddr_t snfi_base; 2448 uint_t snfi_mapoff; 2449 size_t snfi_len; 2450 vnode_t *snfi_vp; 2451 } snf_smap_desbinfo; 2452 2453 /* 2454 * The callback function used for vpm mapped mblks called when the last ref of 2455 * the mblk is dropped which normally occurs when TCP receives the ack. But it 2456 * can be the driver too due to lazy reclaim. 2457 */ 2458 void 2459 snf_vmap_desbfree(snf_vmap_desbinfo *snfv) 2460 { 2461 ASSERT(snfv->snfv_ref != 0); 2462 if (atomic_dec_32_nv(&snfv->snfv_ref) == 0) { 2463 vpm_unmap_pages(snfv->snfv_vml, S_READ); 2464 VN_RELE(snfv->snfv_vp); 2465 kmem_free(snfv, sizeof (snf_vmap_desbinfo)); 2466 } 2467 } 2468 2469 /* 2470 * The callback function used for segmap'ped mblks called when the last ref of 2471 * the mblk is dropped which normally occurs when TCP receives the ack. But it 2472 * can be the driver too due to lazy reclaim. 2473 */ 2474 void 2475 snf_smap_desbfree(snf_smap_desbinfo *snfi) 2476 { 2477 if (! IS_KPM_ADDR(snfi->snfi_base)) { 2478 /* 2479 * We don't need to call segmap_fault(F_SOFTUNLOCK) for 2480 * segmap_kpm as long as the latter never falls back to 2481 * "use_segmap_range". (See segmap_getmapflt().) 2482 * 2483 * Using S_OTHER saves an redundant hat_setref() in 2484 * segmap_unlock() 2485 */ 2486 (void) segmap_fault(kas.a_hat, segkmap, 2487 (caddr_t)(uintptr_t)(((uintptr_t)snfi->snfi_base + 2488 snfi->snfi_mapoff) & PAGEMASK), snfi->snfi_len, 2489 F_SOFTUNLOCK, S_OTHER); 2490 } 2491 (void) segmap_release(segkmap, snfi->snfi_base, SM_DONTNEED); 2492 VN_RELE(snfi->snfi_vp); 2493 kmem_free(snfi, sizeof (*snfi)); 2494 } 2495 2496 /* 2497 * Use segmap or vpm instead of bcopy to send down a desballoca'ed, mblk. 2498 * When segmap is used, the mblk contains a segmap slot of no more 2499 * than MAXBSIZE. 2500 * 2501 * With vpm, a maximum of SNF_MAXVMAPS page-sized mappings can be obtained 2502 * in each iteration and sent by socket_sendmblk until an error occurs or 2503 * the requested size has been transferred. An mblk is esballoca'ed from 2504 * each mapped page and a chain of these mblk is sent to the transport layer. 2505 * vpm will be called to unmap the pages when all mblks have been freed by 2506 * free_func. 2507 * 2508 * At the end of the whole sendfile() operation, we wait till the data from 2509 * the last mblk is ack'ed by the transport before returning so that the 2510 * caller of sendfile() can safely modify the file content. 2511 * 2512 * The caller of this function should make sure that total_size does not exceed 2513 * the actual file size of fvp. 2514 */ 2515 int 2516 snf_segmap(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t total_size, 2517 ssize_t *count, boolean_t nowait) 2518 { 2519 caddr_t base; 2520 int mapoff; 2521 vnode_t *vp; 2522 mblk_t *mp = NULL; 2523 int chain_size; 2524 int error; 2525 clock_t deadlk_wait; 2526 short fflag; 2527 int ksize; 2528 struct vattr va; 2529 boolean_t dowait = B_FALSE; 2530 struct nmsghdr msg; 2531 2532 vp = fp->f_vnode; 2533 fflag = fp->f_flag; 2534 ksize = 0; 2535 bzero(&msg, sizeof (msg)); 2536 2537 for (;;) { 2538 if (ISSIG(curthread, JUSTLOOKING)) { 2539 error = EINTR; 2540 break; 2541 } 2542 2543 if (vpm_enable) { 2544 snf_vmap_desbinfo *snfv; 2545 mblk_t *nmp; 2546 int mblk_size; 2547 int maxsize; 2548 int i; 2549 2550 mapoff = fileoff & PAGEOFFSET; 2551 maxsize = MIN((SNF_VPMMAXPGS * PAGESIZE), total_size); 2552 2553 snfv = kmem_zalloc(sizeof (snf_vmap_desbinfo), 2554 KM_SLEEP); 2555 2556 /* 2557 * Get vpm mappings for maxsize with read access. 2558 * If the pages aren't available yet, we get 2559 * DEADLK, so wait and try again a little later using 2560 * an increasing wait. We might be here a long time. 2561 * 2562 * If delay_sig returns EINTR, be sure to exit and 2563 * pass it up to the caller. 2564 */ 2565 deadlk_wait = 0; 2566 while ((error = vpm_map_pages(fvp, fileoff, 2567 (size_t)maxsize, (VPM_FETCHPAGE), snfv->snfv_vml, 2568 SNF_MAXVMAPS, NULL, S_READ)) == EDEADLK) { 2569 deadlk_wait += (deadlk_wait < 5) ? 1 : 4; 2570 if ((error = delay_sig(deadlk_wait)) != 0) { 2571 break; 2572 } 2573 } 2574 if (error != 0) { 2575 kmem_free(snfv, sizeof (snf_vmap_desbinfo)); 2576 error = (error == EINTR) ? EINTR : EIO; 2577 goto out; 2578 } 2579 snfv->snfv_frtn.free_func = snf_vmap_desbfree; 2580 snfv->snfv_frtn.free_arg = (caddr_t)snfv; 2581 2582 /* Construct the mblk chain from the page mappings */ 2583 chain_size = 0; 2584 for (i = 0; (snfv->snfv_vml[i].vs_addr != NULL) && 2585 total_size > 0; i++) { 2586 ASSERT(chain_size < maxsize); 2587 mblk_size = MIN(snfv->snfv_vml[i].vs_len - 2588 mapoff, total_size); 2589 nmp = esballoca( 2590 (uchar_t *)snfv->snfv_vml[i].vs_addr + 2591 mapoff, mblk_size, BPRI_HI, 2592 &snfv->snfv_frtn); 2593 2594 /* 2595 * We return EAGAIN after unmapping the pages 2596 * if we cannot allocate the the head of the 2597 * chain. Otherwise, we continue sending the 2598 * mblks constructed so far. 2599 */ 2600 if (nmp == NULL) { 2601 if (i == 0) { 2602 vpm_unmap_pages(snfv->snfv_vml, 2603 S_READ); 2604 kmem_free(snfv, 2605 sizeof (snf_vmap_desbinfo)); 2606 error = EAGAIN; 2607 goto out; 2608 } 2609 break; 2610 } 2611 /* Mark this dblk with the zero-copy flag */ 2612 nmp->b_datap->db_struioflag |= STRUIO_ZC; 2613 nmp->b_wptr += mblk_size; 2614 chain_size += mblk_size; 2615 fileoff += mblk_size; 2616 total_size -= mblk_size; 2617 snfv->snfv_ref++; 2618 mapoff = 0; 2619 if (i > 0) 2620 linkb(mp, nmp); 2621 else 2622 mp = nmp; 2623 } 2624 VN_HOLD(fvp); 2625 snfv->snfv_vp = fvp; 2626 } else { 2627 /* vpm not supported. fallback to segmap */ 2628 snf_smap_desbinfo *snfi; 2629 2630 mapoff = fileoff & MAXBOFFSET; 2631 chain_size = MAXBSIZE - mapoff; 2632 if (chain_size > total_size) 2633 chain_size = total_size; 2634 /* 2635 * we don't forcefault because we'll call 2636 * segmap_fault(F_SOFTLOCK) next. 2637 * 2638 * S_READ will get the ref bit set (by either 2639 * segmap_getmapflt() or segmap_fault()) and page 2640 * shared locked. 2641 */ 2642 base = segmap_getmapflt(segkmap, fvp, fileoff, 2643 chain_size, segmap_kpm ? SM_FAULT : 0, S_READ); 2644 2645 snfi = kmem_alloc(sizeof (*snfi), KM_SLEEP); 2646 snfi->snfi_len = (size_t)roundup(mapoff+chain_size, 2647 PAGESIZE)- (mapoff & PAGEMASK); 2648 /* 2649 * We must call segmap_fault() even for segmap_kpm 2650 * because that's how error gets returned. 2651 * (segmap_getmapflt() never fails but segmap_fault() 2652 * does.) 2653 * 2654 * If the pages aren't available yet, we get 2655 * DEADLK, so wait and try again a little later using 2656 * an increasing wait. We might be here a long time. 2657 * 2658 * If delay_sig returns EINTR, be sure to exit and 2659 * pass it up to the caller. 2660 */ 2661 deadlk_wait = 0; 2662 while ((error = FC_ERRNO(segmap_fault(kas.a_hat, 2663 segkmap, (caddr_t)(uintptr_t)(((uintptr_t)base + 2664 mapoff) & PAGEMASK), snfi->snfi_len, F_SOFTLOCK, 2665 S_READ))) == EDEADLK) { 2666 deadlk_wait += (deadlk_wait < 5) ? 1 : 4; 2667 if ((error = delay_sig(deadlk_wait)) != 0) { 2668 break; 2669 } 2670 } 2671 if (error != 0) { 2672 (void) segmap_release(segkmap, base, 0); 2673 kmem_free(snfi, sizeof (*snfi)); 2674 error = (error == EINTR) ? EINTR : EIO; 2675 goto out; 2676 } 2677 snfi->snfi_frtn.free_func = snf_smap_desbfree; 2678 snfi->snfi_frtn.free_arg = (caddr_t)snfi; 2679 snfi->snfi_base = base; 2680 snfi->snfi_mapoff = mapoff; 2681 mp = esballoca((uchar_t *)base + mapoff, chain_size, 2682 BPRI_HI, &snfi->snfi_frtn); 2683 2684 if (mp == NULL) { 2685 (void) segmap_fault(kas.a_hat, segkmap, 2686 (caddr_t)(uintptr_t)(((uintptr_t)base + 2687 mapoff) & PAGEMASK), snfi->snfi_len, 2688 F_SOFTUNLOCK, S_OTHER); 2689 (void) segmap_release(segkmap, base, 0); 2690 kmem_free(snfi, sizeof (*snfi)); 2691 freemsg(mp); 2692 error = EAGAIN; 2693 goto out; 2694 } 2695 VN_HOLD(fvp); 2696 snfi->snfi_vp = fvp; 2697 mp->b_wptr += chain_size; 2698 2699 /* Mark this dblk with the zero-copy flag */ 2700 mp->b_datap->db_struioflag |= STRUIO_ZC; 2701 fileoff += chain_size; 2702 total_size -= chain_size; 2703 } 2704 2705 if (total_size == 0 && !nowait) { 2706 ASSERT(!dowait); 2707 dowait = B_TRUE; 2708 mp->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 2709 } 2710 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2711 error = socket_sendmblk(VTOSO(vp), &msg, fflag, CRED(), &mp); 2712 if (error != 0) { 2713 /* 2714 * mp contains the mblks that were not sent by 2715 * socket_sendmblk. Use its size to update *count 2716 */ 2717 *count = ksize + (chain_size - msgdsize(mp)); 2718 if (mp != NULL) 2719 freemsg(mp); 2720 return (error); 2721 } 2722 ksize += chain_size; 2723 if (total_size == 0) 2724 goto done; 2725 2726 (void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2727 va.va_mask = AT_SIZE; 2728 error = VOP_GETATTR(fvp, &va, 0, kcred, NULL); 2729 if (error) 2730 break; 2731 /* Read as much as possible. */ 2732 if (fileoff >= va.va_size) 2733 break; 2734 if (total_size + fileoff > va.va_size) 2735 total_size = va.va_size - fileoff; 2736 } 2737 out: 2738 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2739 done: 2740 *count = ksize; 2741 if (dowait) { 2742 stdata_t *stp; 2743 2744 stp = vp->v_stream; 2745 if (stp == NULL) { 2746 struct sonode *so; 2747 so = VTOSO(vp); 2748 error = so_zcopy_wait(so); 2749 } else { 2750 mutex_enter(&stp->sd_lock); 2751 while (!(stp->sd_flag & STZCNOTIFY)) { 2752 if (cv_wait_sig(&stp->sd_zcopy_wait, 2753 &stp->sd_lock) == 0) { 2754 error = EINTR; 2755 break; 2756 } 2757 } 2758 stp->sd_flag &= ~STZCNOTIFY; 2759 mutex_exit(&stp->sd_lock); 2760 } 2761 } 2762 return (error); 2763 } 2764 2765 int 2766 snf_cache(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size, 2767 uint_t maxpsz, ssize_t *count) 2768 { 2769 struct vnode *vp; 2770 mblk_t *mp; 2771 int iosize; 2772 int extra = 0; 2773 int error; 2774 short fflag; 2775 int ksize; 2776 int ioflag; 2777 struct uio auio; 2778 struct iovec aiov; 2779 struct vattr va; 2780 int maxblk = 0; 2781 int wroff = 0; 2782 struct sonode *so; 2783 struct nmsghdr msg; 2784 2785 vp = fp->f_vnode; 2786 if (vp->v_type == VSOCK) { 2787 stdata_t *stp; 2788 2789 /* 2790 * Get the extra space to insert a header and a trailer. 2791 */ 2792 so = VTOSO(vp); 2793 stp = vp->v_stream; 2794 if (stp == NULL) { 2795 wroff = so->so_proto_props.sopp_wroff; 2796 maxblk = so->so_proto_props.sopp_maxblk; 2797 extra = wroff + so->so_proto_props.sopp_tail; 2798 } else { 2799 wroff = (int)(stp->sd_wroff); 2800 maxblk = (int)(stp->sd_maxblk); 2801 extra = wroff + (int)(stp->sd_tail); 2802 } 2803 } 2804 bzero(&msg, sizeof (msg)); 2805 fflag = fp->f_flag; 2806 ksize = 0; 2807 auio.uio_iov = &aiov; 2808 auio.uio_iovcnt = 1; 2809 auio.uio_segflg = UIO_SYSSPACE; 2810 auio.uio_llimit = MAXOFFSET_T; 2811 auio.uio_fmode = fflag; 2812 auio.uio_extflg = UIO_COPY_CACHED; 2813 ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC); 2814 /* If read sync is not asked for, filter sync flags */ 2815 if ((ioflag & FRSYNC) == 0) 2816 ioflag &= ~(FSYNC|FDSYNC); 2817 for (;;) { 2818 if (ISSIG(curthread, JUSTLOOKING)) { 2819 error = EINTR; 2820 break; 2821 } 2822 iosize = (int)MIN(maxpsz, size); 2823 2824 /* 2825 * Socket filters can limit the mblk size, 2826 * so limit reads to maxblk if there are 2827 * filters present. 2828 */ 2829 if (vp->v_type == VSOCK && 2830 so->so_filter_active > 0 && maxblk != INFPSZ) 2831 iosize = (int)MIN(iosize, maxblk); 2832 2833 if (is_system_labeled()) { 2834 mp = allocb_cred(iosize + extra, CRED(), 2835 curproc->p_pid); 2836 } else { 2837 mp = allocb(iosize + extra, BPRI_MED); 2838 } 2839 if (mp == NULL) { 2840 error = EAGAIN; 2841 break; 2842 } 2843 2844 mp->b_rptr += wroff; 2845 2846 aiov.iov_base = (caddr_t)mp->b_rptr; 2847 aiov.iov_len = iosize; 2848 auio.uio_loffset = fileoff; 2849 auio.uio_resid = iosize; 2850 2851 error = VOP_READ(fvp, &auio, ioflag, fp->f_cred, NULL); 2852 iosize -= auio.uio_resid; 2853 2854 if (error == EINTR && iosize != 0) 2855 error = 0; 2856 2857 if (error != 0 || iosize == 0) { 2858 freeb(mp); 2859 break; 2860 } 2861 mp->b_wptr = mp->b_rptr + iosize; 2862 2863 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2864 2865 error = socket_sendmblk(VTOSO(vp), &msg, fflag, CRED(), &mp); 2866 2867 if (error != 0) { 2868 *count = ksize; 2869 if (mp != NULL) 2870 freeb(mp); 2871 return (error); 2872 } 2873 ksize += iosize; 2874 size -= iosize; 2875 if (size == 0) 2876 goto done; 2877 2878 fileoff += iosize; 2879 (void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2880 va.va_mask = AT_SIZE; 2881 error = VOP_GETATTR(fvp, &va, 0, kcred, NULL); 2882 if (error) 2883 break; 2884 /* Read as much as possible. */ 2885 if (fileoff >= va.va_size) 2886 size = 0; 2887 else if (size + fileoff > va.va_size) 2888 size = va.va_size - fileoff; 2889 } 2890 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2891 done: 2892 *count = ksize; 2893 return (error); 2894 } 2895 2896 #if defined(_SYSCALL32_IMPL) || defined(_ILP32) 2897 /* 2898 * Largefile support for 32 bit applications only. 2899 */ 2900 int 2901 sosendfile64(file_t *fp, file_t *rfp, const struct ksendfilevec64 *sfv, 2902 ssize32_t *count32) 2903 { 2904 ssize32_t sfv_len; 2905 u_offset_t sfv_off, va_size; 2906 struct vnode *vp, *fvp, *realvp; 2907 struct vattr va; 2908 stdata_t *stp; 2909 ssize_t count = 0; 2910 int error = 0; 2911 boolean_t dozcopy = B_FALSE; 2912 uint_t maxpsz; 2913 2914 sfv_len = (ssize32_t)sfv->sfv_len; 2915 if (sfv_len < 0) { 2916 error = EINVAL; 2917 goto out; 2918 } 2919 2920 if (sfv_len == 0) goto out; 2921 2922 sfv_off = (u_offset_t)sfv->sfv_off; 2923 2924 /* Same checks as in pread */ 2925 if (sfv_off > MAXOFFSET_T) { 2926 error = EINVAL; 2927 goto out; 2928 } 2929 if (sfv_off + sfv_len > MAXOFFSET_T) 2930 sfv_len = (ssize32_t)(MAXOFFSET_T - sfv_off); 2931 2932 /* 2933 * There are no more checks on sfv_len. So, we cast it to 2934 * u_offset_t and share the snf_direct_io/snf_cache code between 2935 * 32 bit and 64 bit. 2936 * 2937 * TODO: should do nbl_need_check() like read()? 2938 */ 2939 if (sfv_len > sendfile_max_size) { 2940 sf_stats.ss_file_not_cached++; 2941 error = snf_direct_io(fp, rfp, sfv_off, (u_offset_t)sfv_len, 2942 &count); 2943 goto out; 2944 } 2945 fvp = rfp->f_vnode; 2946 if (VOP_REALVP(fvp, &realvp, NULL) == 0) 2947 fvp = realvp; 2948 /* 2949 * Grab the lock as a reader to prevent the file size 2950 * from changing underneath. 2951 */ 2952 (void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2953 va.va_mask = AT_SIZE; 2954 error = VOP_GETATTR(fvp, &va, 0, kcred, NULL); 2955 va_size = va.va_size; 2956 if ((error != 0) || (va_size == 0) || (sfv_off >= va_size)) { 2957 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2958 goto out; 2959 } 2960 /* Read as much as possible. */ 2961 if (sfv_off + sfv_len > va_size) 2962 sfv_len = va_size - sfv_off; 2963 2964 vp = fp->f_vnode; 2965 stp = vp->v_stream; 2966 /* 2967 * When the NOWAIT flag is not set, we enable zero-copy only if the 2968 * transfer size is large enough. This prevents performance loss 2969 * when the caller sends the file piece by piece. 2970 */ 2971 if (sfv_len >= MAXBSIZE && (sfv_len >= (va_size >> 1) || 2972 (sfv->sfv_flag & SFV_NOWAIT) || sfv_len >= 0x1000000) && 2973 !vn_has_flocks(fvp) && !(fvp->v_flag & VNOMAP)) { 2974 uint_t copyflag; 2975 copyflag = stp != NULL ? stp->sd_copyflag : 2976 VTOSO(vp)->so_proto_props.sopp_zcopyflag; 2977 if ((copyflag & (STZCVMSAFE|STZCVMUNSAFE)) == 0) { 2978 int on = 1; 2979 2980 if (socket_setsockopt(VTOSO(vp), SOL_SOCKET, 2981 SO_SND_COPYAVOID, &on, sizeof (on), CRED()) == 0) 2982 dozcopy = B_TRUE; 2983 } else { 2984 dozcopy = copyflag & STZCVMSAFE; 2985 } 2986 } 2987 if (dozcopy) { 2988 sf_stats.ss_file_segmap++; 2989 error = snf_segmap(fp, fvp, sfv_off, (u_offset_t)sfv_len, 2990 &count, ((sfv->sfv_flag & SFV_NOWAIT) != 0)); 2991 } else { 2992 if (vp->v_type == VSOCK && stp == NULL) { 2993 sonode_t *so = VTOSO(vp); 2994 maxpsz = so->so_proto_props.sopp_maxpsz; 2995 } else if (stp != NULL) { 2996 maxpsz = stp->sd_qn_maxpsz; 2997 } else { 2998 maxpsz = maxphys; 2999 } 3000 3001 if (maxpsz == INFPSZ) 3002 maxpsz = maxphys; 3003 else 3004 maxpsz = roundup(maxpsz, MAXBSIZE); 3005 sf_stats.ss_file_cached++; 3006 error = snf_cache(fp, fvp, sfv_off, (u_offset_t)sfv_len, 3007 maxpsz, &count); 3008 } 3009 out: 3010 releasef(sfv->sfv_fd); 3011 *count32 = (ssize32_t)count; 3012 return (error); 3013 } 3014 #endif 3015 3016 #ifdef _SYSCALL32_IMPL 3017 /* 3018 * recv32(), recvfrom32(), send32(), sendto32(): intentionally return a 3019 * ssize_t rather than ssize32_t; see the comments above read32 for details. 3020 */ 3021 3022 ssize_t 3023 recv32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags) 3024 { 3025 return (recv(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags)); 3026 } 3027 3028 ssize_t 3029 recvfrom32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags, 3030 caddr32_t name, caddr32_t namelenp) 3031 { 3032 return (recvfrom(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags, 3033 (void *)(uintptr_t)name, (void *)(uintptr_t)namelenp)); 3034 } 3035 3036 ssize_t 3037 send32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags) 3038 { 3039 return (send(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags)); 3040 } 3041 3042 ssize_t 3043 sendto32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags, 3044 caddr32_t name, socklen_t namelen) 3045 { 3046 return (sendto(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags, 3047 (void *)(uintptr_t)name, namelen)); 3048 } 3049 #endif /* _SYSCALL32_IMPL */ 3050 3051 /* 3052 * Function wrappers (mostly around the sonode switch) for 3053 * backward compatibility. 3054 */ 3055 3056 int 3057 soaccept(struct sonode *so, int fflag, struct sonode **nsop) 3058 { 3059 return (socket_accept(so, fflag, CRED(), nsop)); 3060 } 3061 3062 int 3063 sobind(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3064 int backlog, int flags) 3065 { 3066 int error; 3067 3068 error = socket_bind(so, name, namelen, flags, CRED()); 3069 if (error == 0 && backlog != 0) 3070 return (socket_listen(so, backlog, CRED())); 3071 3072 return (error); 3073 } 3074 3075 int 3076 solisten(struct sonode *so, int backlog) 3077 { 3078 return (socket_listen(so, backlog, CRED())); 3079 } 3080 3081 int 3082 soconnect(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3083 int fflag, int flags) 3084 { 3085 return (socket_connect(so, name, namelen, fflag, flags, CRED())); 3086 } 3087 3088 int 3089 sorecvmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop) 3090 { 3091 return (socket_recvmsg(so, msg, uiop, CRED())); 3092 } 3093 3094 int 3095 sosendmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop) 3096 { 3097 return (socket_sendmsg(so, msg, uiop, CRED())); 3098 } 3099 3100 int 3101 soshutdown(struct sonode *so, int how) 3102 { 3103 return (socket_shutdown(so, how, CRED())); 3104 } 3105 3106 int 3107 sogetsockopt(struct sonode *so, int level, int option_name, void *optval, 3108 socklen_t *optlenp, int flags) 3109 { 3110 return (socket_getsockopt(so, level, option_name, optval, optlenp, 3111 flags, CRED())); 3112 } 3113 3114 int 3115 sosetsockopt(struct sonode *so, int level, int option_name, const void *optval, 3116 t_uscalar_t optlen) 3117 { 3118 return (socket_setsockopt(so, level, option_name, optval, optlen, 3119 CRED())); 3120 } 3121 3122 /* 3123 * Because this is backward compatibility interface it only needs to be 3124 * able to handle the creation of TPI sockfs sockets. 3125 */ 3126 struct sonode * 3127 socreate(struct sockparams *sp, int family, int type, int protocol, int version, 3128 int *errorp) 3129 { 3130 struct sonode *so; 3131 3132 ASSERT(sp != NULL); 3133 3134 so = sp->sp_smod_info->smod_sock_create_func(sp, family, type, protocol, 3135 version, SOCKET_SLEEP, errorp, CRED()); 3136 if (so == NULL) { 3137 SOCKPARAMS_DEC_REF(sp); 3138 } else { 3139 if ((*errorp = SOP_INIT(so, NULL, CRED(), SOCKET_SLEEP)) == 0) { 3140 /* Cannot fail, only bumps so_count */ 3141 (void) VOP_OPEN(&SOTOV(so), FREAD|FWRITE, CRED(), NULL); 3142 } else { 3143 socket_destroy(so); 3144 so = NULL; 3145 } 3146 } 3147 return (so); 3148 } 3149