1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 68 */ 69 70 #include "opt_inet.h" 71 #include "opt_sctp.h" 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/fcntl.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/domain.h> 79 #include <sys/file.h> /* for struct knote */ 80 #include <sys/kernel.h> 81 #include <sys/event.h> 82 #include <sys/proc.h> 83 #include <sys/protosw.h> 84 #include <sys/socket.h> 85 #include <sys/socketvar.h> 86 #include <sys/socketops.h> 87 #include <sys/resourcevar.h> 88 #include <sys/signalvar.h> 89 #include <sys/sysctl.h> 90 #include <sys/uio.h> 91 #include <sys/jail.h> 92 #include <vm/vm_zone.h> 93 #include <vm/pmap.h> 94 #include <net/netmsg2.h> 95 #include <net/netisr2.h> 96 97 #include <sys/thread2.h> 98 #include <sys/socketvar2.h> 99 #include <sys/spinlock2.h> 100 101 #include <machine/limits.h> 102 103 #ifdef INET 104 extern int tcp_sosend_agglim; 105 extern int tcp_sosend_async; 106 extern int udp_sosend_async; 107 extern int udp_sosend_prepend; 108 109 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 110 #endif /* INET */ 111 112 static void filt_sordetach(struct knote *kn); 113 static int filt_soread(struct knote *kn, long hint); 114 static void filt_sowdetach(struct knote *kn); 115 static int filt_sowrite(struct knote *kn, long hint); 116 static int filt_solisten(struct knote *kn, long hint); 117 118 static void sodiscard(struct socket *so); 119 static int soclose_sync(struct socket *so, int fflag); 120 static void soclose_fast(struct socket *so); 121 122 static struct filterops solisten_filtops = 123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 124 static struct filterops soread_filtops = 125 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 126 static struct filterops sowrite_filtops = 127 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 128 static struct filterops soexcept_filtops = 129 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 130 131 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 132 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 133 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 134 135 136 static int somaxconn = SOMAXCONN; 137 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 138 &somaxconn, 0, "Maximum pending socket connection queue size"); 139 140 static int use_soclose_fast = 1; 141 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 142 &use_soclose_fast, 0, "Fast socket close"); 143 144 int use_soaccept_pred_fast = 1; 145 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 146 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 147 148 int use_sendfile_async = 1; 149 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 150 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 151 152 /* 153 * Socket operation routines. 154 * These routines are called by the routines in 155 * sys_socket.c or from a system process, and 156 * implement the semantics of socket operations by 157 * switching out to the protocol specific routines. 158 */ 159 160 /* 161 * Get a socket structure, and initialize it. 162 * Note that it would probably be better to allocate socket 163 * and PCB at the same time, but I'm not convinced that all 164 * the protocols can be easily modified to do this. 165 */ 166 struct socket * 167 soalloc(int waitok, struct protosw *pr) 168 { 169 struct socket *so; 170 unsigned waitmask; 171 172 waitmask = waitok ? M_WAITOK : M_NOWAIT; 173 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 174 if (so) { 175 /* XXX race condition for reentrant kernel */ 176 so->so_proto = pr; 177 TAILQ_INIT(&so->so_aiojobq); 178 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 179 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 180 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 181 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 182 spin_init(&so->so_rcvd_spin); 183 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 184 MSGF_DROPABLE, so->so_proto->pr_usrreqs->pru_rcvd); 185 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 186 so->so_state = SS_NOFDREF; 187 so->so_refs = 1; 188 } 189 return so; 190 } 191 192 int 193 socreate(int dom, struct socket **aso, int type, 194 int proto, struct thread *td) 195 { 196 struct proc *p = td->td_proc; 197 struct protosw *prp; 198 struct socket *so; 199 struct pru_attach_info ai; 200 int error; 201 202 if (proto) 203 prp = pffindproto(dom, proto, type); 204 else 205 prp = pffindtype(dom, type); 206 207 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 208 return (EPROTONOSUPPORT); 209 210 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 211 prp->pr_domain->dom_family != PF_LOCAL && 212 prp->pr_domain->dom_family != PF_INET && 213 prp->pr_domain->dom_family != PF_INET6 && 214 prp->pr_domain->dom_family != PF_ROUTE) { 215 return (EPROTONOSUPPORT); 216 } 217 218 if (prp->pr_type != type) 219 return (EPROTOTYPE); 220 so = soalloc(p != NULL, prp); 221 if (so == NULL) 222 return (ENOBUFS); 223 224 /* 225 * Callers of socreate() presumably will connect up a descriptor 226 * and call soclose() if they cannot. This represents our so_refs 227 * (which should be 1) from soalloc(). 228 */ 229 soclrstate(so, SS_NOFDREF); 230 231 /* 232 * Set a default port for protocol processing. No action will occur 233 * on the socket on this port until an inpcb is attached to it and 234 * is able to match incoming packets, or until the socket becomes 235 * available to userland. 236 * 237 * We normally default the socket to the protocol thread on cpu 0. 238 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 239 * thread and all pr_*()/pru_*() calls are executed synchronously. 240 */ 241 if (prp->pr_flags & PR_SYNC_PORT) 242 so->so_port = &netisr_sync_port; 243 else 244 so->so_port = netisr_cpuport(0); 245 246 TAILQ_INIT(&so->so_incomp); 247 TAILQ_INIT(&so->so_comp); 248 so->so_type = type; 249 so->so_cred = crhold(p->p_ucred); 250 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 251 ai.p_ucred = p->p_ucred; 252 ai.fd_rdir = p->p_fd->fd_rdir; 253 254 /* 255 * Auto-sizing of socket buffers is managed by the protocols and 256 * the appropriate flags must be set in the pru_attach function. 257 */ 258 error = so_pru_attach(so, proto, &ai); 259 if (error) { 260 sosetstate(so, SS_NOFDREF); 261 sofree(so); /* from soalloc */ 262 return error; 263 } 264 265 /* 266 * NOTE: Returns referenced socket. 267 */ 268 *aso = so; 269 return (0); 270 } 271 272 int 273 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 274 { 275 int error; 276 277 error = so_pru_bind(so, nam, td); 278 return (error); 279 } 280 281 static void 282 sodealloc(struct socket *so) 283 { 284 if (so->so_rcv.ssb_hiwat) 285 (void)chgsbsize(so->so_cred->cr_uidinfo, 286 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 287 if (so->so_snd.ssb_hiwat) 288 (void)chgsbsize(so->so_cred->cr_uidinfo, 289 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 290 #ifdef INET 291 /* remove accept filter if present */ 292 if (so->so_accf != NULL) 293 do_setopt_accept_filter(so, NULL); 294 #endif /* INET */ 295 crfree(so->so_cred); 296 if (so->so_faddr != NULL) 297 kfree(so->so_faddr, M_SONAME); 298 kfree(so, M_SOCKET); 299 } 300 301 int 302 solisten(struct socket *so, int backlog, struct thread *td) 303 { 304 int error; 305 #ifdef SCTP 306 short oldopt, oldqlimit; 307 #endif /* SCTP */ 308 309 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 310 return (EINVAL); 311 312 #ifdef SCTP 313 oldopt = so->so_options; 314 oldqlimit = so->so_qlimit; 315 #endif /* SCTP */ 316 317 lwkt_gettoken(&so->so_rcv.ssb_token); 318 if (TAILQ_EMPTY(&so->so_comp)) 319 so->so_options |= SO_ACCEPTCONN; 320 lwkt_reltoken(&so->so_rcv.ssb_token); 321 if (backlog < 0 || backlog > somaxconn) 322 backlog = somaxconn; 323 so->so_qlimit = backlog; 324 /* SCTP needs to look at tweak both the inbound backlog parameter AND 325 * the so_options (UDP model both connect's and gets inbound 326 * connections .. implicitly). 327 */ 328 error = so_pru_listen(so, td); 329 if (error) { 330 #ifdef SCTP 331 /* Restore the params */ 332 so->so_options = oldopt; 333 so->so_qlimit = oldqlimit; 334 #endif /* SCTP */ 335 return (error); 336 } 337 return (0); 338 } 339 340 /* 341 * Destroy a disconnected socket. This routine is a NOP if entities 342 * still have a reference on the socket: 343 * 344 * so_pcb - The protocol stack still has a reference 345 * SS_NOFDREF - There is no longer a file pointer reference 346 */ 347 void 348 sofree(struct socket *so) 349 { 350 struct socket *head; 351 352 /* 353 * This is a bit hackish at the moment. We need to interlock 354 * any accept queue we are on before we potentially lose the 355 * last reference to avoid races against a re-reference from 356 * someone operating on the queue. 357 */ 358 while ((head = so->so_head) != NULL) { 359 lwkt_getpooltoken(head); 360 if (so->so_head == head) 361 break; 362 lwkt_relpooltoken(head); 363 } 364 365 /* 366 * Arbitrage the last free. 367 */ 368 KKASSERT(so->so_refs > 0); 369 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 370 if (head) 371 lwkt_relpooltoken(head); 372 return; 373 } 374 375 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 376 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 377 378 /* 379 * We're done, remove ourselves from the accept queue we are 380 * on, if we are on one. 381 */ 382 if (head != NULL) { 383 if (so->so_state & SS_INCOMP) { 384 TAILQ_REMOVE(&head->so_incomp, so, so_list); 385 head->so_incqlen--; 386 } else if (so->so_state & SS_COMP) { 387 /* 388 * We must not decommission a socket that's 389 * on the accept(2) queue. If we do, then 390 * accept(2) may hang after select(2) indicated 391 * that the listening socket was ready. 392 */ 393 lwkt_relpooltoken(head); 394 return; 395 } else { 396 panic("sofree: not queued"); 397 } 398 soclrstate(so, SS_INCOMP); 399 so->so_head = NULL; 400 lwkt_relpooltoken(head); 401 } 402 ssb_release(&so->so_snd, so); 403 sorflush(so); 404 sodealloc(so); 405 } 406 407 /* 408 * Close a socket on last file table reference removal. 409 * Initiate disconnect if connected. 410 * Free socket when disconnect complete. 411 */ 412 int 413 soclose(struct socket *so, int fflag) 414 { 415 int error; 416 417 funsetown(&so->so_sigio); 418 if (!use_soclose_fast || 419 (so->so_proto->pr_flags & PR_SYNC_PORT) || 420 (so->so_options & SO_LINGER)) { 421 error = soclose_sync(so, fflag); 422 } else { 423 soclose_fast(so); 424 error = 0; 425 } 426 return error; 427 } 428 429 static void 430 sodiscard(struct socket *so) 431 { 432 lwkt_getpooltoken(so); 433 if (so->so_options & SO_ACCEPTCONN) { 434 struct socket *sp; 435 436 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 437 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 438 soclrstate(sp, SS_INCOMP); 439 sp->so_head = NULL; 440 so->so_incqlen--; 441 soaborta(sp); 442 } 443 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 444 TAILQ_REMOVE(&so->so_comp, sp, so_list); 445 soclrstate(sp, SS_COMP); 446 sp->so_head = NULL; 447 so->so_qlen--; 448 soaborta(sp); 449 } 450 } 451 lwkt_relpooltoken(so); 452 453 if (so->so_state & SS_NOFDREF) 454 panic("soclose: NOFDREF"); 455 sosetstate(so, SS_NOFDREF); /* take ref */ 456 } 457 458 static int 459 soclose_sync(struct socket *so, int fflag) 460 { 461 int error = 0; 462 463 if (so->so_pcb == NULL) 464 goto discard; 465 if (so->so_state & SS_ISCONNECTED) { 466 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 467 error = sodisconnect(so); 468 if (error) 469 goto drop; 470 } 471 if (so->so_options & SO_LINGER) { 472 if ((so->so_state & SS_ISDISCONNECTING) && 473 (fflag & FNONBLOCK)) 474 goto drop; 475 while (so->so_state & SS_ISCONNECTED) { 476 error = tsleep(&so->so_timeo, PCATCH, 477 "soclos", so->so_linger * hz); 478 if (error) 479 break; 480 } 481 } 482 } 483 drop: 484 if (so->so_pcb) { 485 int error2; 486 487 error2 = so_pru_detach(so); 488 if (error == 0) 489 error = error2; 490 } 491 discard: 492 sodiscard(so); 493 so_pru_sync(so); /* unpend async sending */ 494 sofree(so); /* dispose of ref */ 495 496 return (error); 497 } 498 499 static void 500 soclose_sofree_async_handler(netmsg_t msg) 501 { 502 sofree(msg->base.nm_so); 503 } 504 505 static void 506 soclose_sofree_async(struct socket *so) 507 { 508 struct netmsg_base *base = &so->so_clomsg; 509 510 netmsg_init(base, so, &netisr_apanic_rport, 0, 511 soclose_sofree_async_handler); 512 lwkt_sendmsg(so->so_port, &base->lmsg); 513 } 514 515 static void 516 soclose_disconn_async_handler(netmsg_t msg) 517 { 518 struct socket *so = msg->base.nm_so; 519 520 if ((so->so_state & SS_ISCONNECTED) && 521 (so->so_state & SS_ISDISCONNECTING) == 0) 522 so_pru_disconnect_direct(so); 523 524 if (so->so_pcb) 525 so_pru_detach_direct(so); 526 527 sodiscard(so); 528 sofree(so); 529 } 530 531 static void 532 soclose_disconn_async(struct socket *so) 533 { 534 struct netmsg_base *base = &so->so_clomsg; 535 536 netmsg_init(base, so, &netisr_apanic_rport, 0, 537 soclose_disconn_async_handler); 538 lwkt_sendmsg(so->so_port, &base->lmsg); 539 } 540 541 static void 542 soclose_detach_async_handler(netmsg_t msg) 543 { 544 struct socket *so = msg->base.nm_so; 545 546 if (so->so_pcb) 547 so_pru_detach_direct(so); 548 549 sodiscard(so); 550 sofree(so); 551 } 552 553 static void 554 soclose_detach_async(struct socket *so) 555 { 556 struct netmsg_base *base = &so->so_clomsg; 557 558 netmsg_init(base, so, &netisr_apanic_rport, 0, 559 soclose_detach_async_handler); 560 lwkt_sendmsg(so->so_port, &base->lmsg); 561 } 562 563 static void 564 soclose_fast(struct socket *so) 565 { 566 if (so->so_pcb == NULL) 567 goto discard; 568 569 if ((so->so_state & SS_ISCONNECTED) && 570 (so->so_state & SS_ISDISCONNECTING) == 0) { 571 soclose_disconn_async(so); 572 return; 573 } 574 575 if (so->so_pcb) { 576 soclose_detach_async(so); 577 return; 578 } 579 580 discard: 581 sodiscard(so); 582 soclose_sofree_async(so); 583 } 584 585 /* 586 * Abort and destroy a socket. Only one abort can be in progress 587 * at any given moment. 588 */ 589 void 590 soabort(struct socket *so) 591 { 592 soreference(so); 593 so_pru_abort(so); 594 } 595 596 void 597 soaborta(struct socket *so) 598 { 599 soreference(so); 600 so_pru_aborta(so); 601 } 602 603 void 604 soabort_oncpu(struct socket *so) 605 { 606 soreference(so); 607 so_pru_abort_oncpu(so); 608 } 609 610 /* 611 * so is passed in ref'd, which becomes owned by 612 * the cleared SS_NOFDREF flag. 613 */ 614 void 615 soaccept_generic(struct socket *so) 616 { 617 if ((so->so_state & SS_NOFDREF) == 0) 618 panic("soaccept: !NOFDREF"); 619 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 620 } 621 622 int 623 soaccept(struct socket *so, struct sockaddr **nam) 624 { 625 int error; 626 627 soaccept_generic(so); 628 error = so_pru_accept(so, nam); 629 return (error); 630 } 631 632 int 633 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 634 { 635 int error; 636 637 if (so->so_options & SO_ACCEPTCONN) 638 return (EOPNOTSUPP); 639 /* 640 * If protocol is connection-based, can only connect once. 641 * Otherwise, if connected, try to disconnect first. 642 * This allows user to disconnect by connecting to, e.g., 643 * a null address. 644 */ 645 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 646 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 647 (error = sodisconnect(so)))) { 648 error = EISCONN; 649 } else { 650 /* 651 * Prevent accumulated error from previous connection 652 * from biting us. 653 */ 654 so->so_error = 0; 655 error = so_pru_connect(so, nam, td); 656 } 657 return (error); 658 } 659 660 int 661 soconnect2(struct socket *so1, struct socket *so2) 662 { 663 int error; 664 665 error = so_pru_connect2(so1, so2); 666 return (error); 667 } 668 669 int 670 sodisconnect(struct socket *so) 671 { 672 int error; 673 674 if ((so->so_state & SS_ISCONNECTED) == 0) { 675 error = ENOTCONN; 676 goto bad; 677 } 678 if (so->so_state & SS_ISDISCONNECTING) { 679 error = EALREADY; 680 goto bad; 681 } 682 error = so_pru_disconnect(so); 683 bad: 684 return (error); 685 } 686 687 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 688 /* 689 * Send on a socket. 690 * If send must go all at once and message is larger than 691 * send buffering, then hard error. 692 * Lock against other senders. 693 * If must go all at once and not enough room now, then 694 * inform user that this would block and do nothing. 695 * Otherwise, if nonblocking, send as much as possible. 696 * The data to be sent is described by "uio" if nonzero, 697 * otherwise by the mbuf chain "top" (which must be null 698 * if uio is not). Data provided in mbuf chain must be small 699 * enough to send all at once. 700 * 701 * Returns nonzero on error, timeout or signal; callers 702 * must check for short counts if EINTR/ERESTART are returned. 703 * Data and control buffers are freed on return. 704 */ 705 int 706 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 707 struct mbuf *top, struct mbuf *control, int flags, 708 struct thread *td) 709 { 710 struct mbuf **mp; 711 struct mbuf *m; 712 size_t resid; 713 int space, len; 714 int clen = 0, error, dontroute, mlen; 715 int atomic = sosendallatonce(so) || top; 716 int pru_flags; 717 718 if (uio) { 719 resid = uio->uio_resid; 720 } else { 721 resid = (size_t)top->m_pkthdr.len; 722 #ifdef INVARIANTS 723 len = 0; 724 for (m = top; m; m = m->m_next) 725 len += m->m_len; 726 KKASSERT(top->m_pkthdr.len == len); 727 #endif 728 } 729 730 /* 731 * WARNING! resid is unsigned, space and len are signed. space 732 * can wind up negative if the sockbuf is overcommitted. 733 * 734 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 735 * type sockets since that's an error. 736 */ 737 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 738 error = EINVAL; 739 goto out; 740 } 741 742 dontroute = 743 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 744 (so->so_proto->pr_flags & PR_ATOMIC); 745 if (td->td_lwp != NULL) 746 td->td_lwp->lwp_ru.ru_msgsnd++; 747 if (control) 748 clen = control->m_len; 749 #define gotoerr(errcode) { error = errcode; goto release; } 750 751 restart: 752 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 753 if (error) 754 goto out; 755 756 do { 757 if (so->so_state & SS_CANTSENDMORE) 758 gotoerr(EPIPE); 759 if (so->so_error) { 760 error = so->so_error; 761 so->so_error = 0; 762 goto release; 763 } 764 if ((so->so_state & SS_ISCONNECTED) == 0) { 765 /* 766 * `sendto' and `sendmsg' is allowed on a connection- 767 * based socket if it supports implied connect. 768 * Return ENOTCONN if not connected and no address is 769 * supplied. 770 */ 771 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 772 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 773 if ((so->so_state & SS_ISCONFIRMING) == 0 && 774 !(resid == 0 && clen != 0)) 775 gotoerr(ENOTCONN); 776 } else if (addr == NULL) 777 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 778 ENOTCONN : EDESTADDRREQ); 779 } 780 if ((atomic && resid > so->so_snd.ssb_hiwat) || 781 clen > so->so_snd.ssb_hiwat) { 782 gotoerr(EMSGSIZE); 783 } 784 space = ssb_space(&so->so_snd); 785 if (flags & MSG_OOB) 786 space += 1024; 787 if ((space < 0 || (size_t)space < resid + clen) && uio && 788 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 789 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 790 gotoerr(EWOULDBLOCK); 791 ssb_unlock(&so->so_snd); 792 error = ssb_wait(&so->so_snd); 793 if (error) 794 goto out; 795 goto restart; 796 } 797 mp = ⊤ 798 space -= clen; 799 do { 800 if (uio == NULL) { 801 /* 802 * Data is prepackaged in "top". 803 */ 804 resid = 0; 805 if (flags & MSG_EOR) 806 top->m_flags |= M_EOR; 807 } else do { 808 if (resid > INT_MAX) 809 resid = INT_MAX; 810 m = m_getl((int)resid, MB_WAIT, MT_DATA, 811 top == NULL ? M_PKTHDR : 0, &mlen); 812 if (top == NULL) { 813 m->m_pkthdr.len = 0; 814 m->m_pkthdr.rcvif = NULL; 815 } 816 len = imin((int)szmin(mlen, resid), space); 817 if (resid < MINCLSIZE) { 818 /* 819 * For datagram protocols, leave room 820 * for protocol headers in first mbuf. 821 */ 822 if (atomic && top == NULL && len < mlen) 823 MH_ALIGN(m, len); 824 } 825 space -= len; 826 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 827 resid = uio->uio_resid; 828 m->m_len = len; 829 *mp = m; 830 top->m_pkthdr.len += len; 831 if (error) 832 goto release; 833 mp = &m->m_next; 834 if (resid == 0) { 835 if (flags & MSG_EOR) 836 top->m_flags |= M_EOR; 837 break; 838 } 839 } while (space > 0 && atomic); 840 if (dontroute) 841 so->so_options |= SO_DONTROUTE; 842 if (flags & MSG_OOB) { 843 pru_flags = PRUS_OOB; 844 } else if ((flags & MSG_EOF) && 845 (so->so_proto->pr_flags & PR_IMPLOPCL) && 846 (resid == 0)) { 847 /* 848 * If the user set MSG_EOF, the protocol 849 * understands this flag and nothing left to 850 * send then use PRU_SEND_EOF instead of PRU_SEND. 851 */ 852 pru_flags = PRUS_EOF; 853 } else if (resid > 0 && space > 0) { 854 /* If there is more to send, set PRUS_MORETOCOME */ 855 pru_flags = PRUS_MORETOCOME; 856 } else { 857 pru_flags = 0; 858 } 859 /* 860 * XXX all the SS_CANTSENDMORE checks previously 861 * done could be out of date. We could have recieved 862 * a reset packet in an interrupt or maybe we slept 863 * while doing page faults in uiomove() etc. We could 864 * probably recheck again inside the splnet() protection 865 * here, but there are probably other places that this 866 * also happens. We must rethink this. 867 */ 868 error = so_pru_send(so, pru_flags, top, addr, control, td); 869 if (dontroute) 870 so->so_options &= ~SO_DONTROUTE; 871 clen = 0; 872 control = NULL; 873 top = NULL; 874 mp = ⊤ 875 if (error) 876 goto release; 877 } while (resid && space > 0); 878 } while (resid); 879 880 release: 881 ssb_unlock(&so->so_snd); 882 out: 883 if (top) 884 m_freem(top); 885 if (control) 886 m_freem(control); 887 return (error); 888 } 889 890 #ifdef INET 891 /* 892 * A specialization of sosend() for UDP based on protocol-specific knowledge: 893 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 894 * sosendallatonce() returns true, 895 * the "atomic" variable is true, 896 * and sosendudp() blocks until space is available for the entire send. 897 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 898 * PR_IMPLOPCL flags set. 899 * UDP has no out-of-band data. 900 * UDP has no control data. 901 * UDP does not support MSG_EOR. 902 */ 903 int 904 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 905 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 906 { 907 size_t resid; 908 int error, pru_flags = 0; 909 int space; 910 911 if (td->td_lwp != NULL) 912 td->td_lwp->lwp_ru.ru_msgsnd++; 913 if (control) 914 m_freem(control); 915 916 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 917 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 918 919 restart: 920 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 921 if (error) 922 goto out; 923 924 if (so->so_state & SS_CANTSENDMORE) 925 gotoerr(EPIPE); 926 if (so->so_error) { 927 error = so->so_error; 928 so->so_error = 0; 929 goto release; 930 } 931 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 932 gotoerr(EDESTADDRREQ); 933 if (resid > so->so_snd.ssb_hiwat) 934 gotoerr(EMSGSIZE); 935 space = ssb_space(&so->so_snd); 936 if (uio && (space < 0 || (size_t)space < resid)) { 937 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 938 gotoerr(EWOULDBLOCK); 939 ssb_unlock(&so->so_snd); 940 error = ssb_wait(&so->so_snd); 941 if (error) 942 goto out; 943 goto restart; 944 } 945 946 if (uio) { 947 int hdrlen = max_hdr; 948 949 /* 950 * We try to optimize out the additional mbuf 951 * allocations in M_PREPEND() on output path, e.g. 952 * - udp_output(), when it tries to prepend protocol 953 * headers. 954 * - Link layer output function, when it tries to 955 * prepend link layer header. 956 * 957 * This probably will not benefit any data that will 958 * be fragmented, so this optimization is only performed 959 * when the size of data and max size of protocol+link 960 * headers fit into one mbuf cluster. 961 */ 962 if (uio->uio_resid > MCLBYTES - hdrlen || 963 !udp_sosend_prepend) { 964 top = m_uiomove(uio); 965 if (top == NULL) 966 goto release; 967 } else { 968 int nsize; 969 970 top = m_getl(uio->uio_resid + hdrlen, MB_WAIT, 971 MT_DATA, M_PKTHDR, &nsize); 972 KASSERT(nsize >= uio->uio_resid + hdrlen, 973 ("sosendudp invalid nsize %d, " 974 "resid %zu, hdrlen %d", 975 nsize, uio->uio_resid, hdrlen)); 976 977 top->m_len = uio->uio_resid; 978 top->m_pkthdr.len = uio->uio_resid; 979 top->m_data += hdrlen; 980 981 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 982 if (error) 983 goto out; 984 } 985 } 986 987 if (flags & MSG_DONTROUTE) 988 pru_flags |= PRUS_DONTROUTE; 989 990 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 991 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 992 error = 0; 993 } else { 994 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 995 } 996 top = NULL; /* sent or freed in lower layer */ 997 998 release: 999 ssb_unlock(&so->so_snd); 1000 out: 1001 if (top) 1002 m_freem(top); 1003 return (error); 1004 } 1005 1006 int 1007 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1008 struct mbuf *top, struct mbuf *control, int flags, 1009 struct thread *td) 1010 { 1011 struct mbuf **mp; 1012 struct mbuf *m; 1013 size_t resid; 1014 int space, len; 1015 int error, mlen; 1016 int allatonce; 1017 int pru_flags; 1018 1019 if (uio) { 1020 KKASSERT(top == NULL); 1021 allatonce = 0; 1022 resid = uio->uio_resid; 1023 } else { 1024 allatonce = 1; 1025 resid = (size_t)top->m_pkthdr.len; 1026 #ifdef INVARIANTS 1027 len = 0; 1028 for (m = top; m; m = m->m_next) 1029 len += m->m_len; 1030 KKASSERT(top->m_pkthdr.len == len); 1031 #endif 1032 } 1033 1034 /* 1035 * WARNING! resid is unsigned, space and len are signed. space 1036 * can wind up negative if the sockbuf is overcommitted. 1037 * 1038 * Also check to make sure that MSG_EOR isn't used on TCP 1039 */ 1040 if (flags & MSG_EOR) { 1041 error = EINVAL; 1042 goto out; 1043 } 1044 1045 if (control) { 1046 /* TCP doesn't do control messages (rights, creds, etc) */ 1047 if (control->m_len) { 1048 error = EINVAL; 1049 goto out; 1050 } 1051 m_freem(control); /* empty control, just free it */ 1052 control = NULL; 1053 } 1054 1055 if (td->td_lwp != NULL) 1056 td->td_lwp->lwp_ru.ru_msgsnd++; 1057 1058 #define gotoerr(errcode) { error = errcode; goto release; } 1059 1060 restart: 1061 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1062 if (error) 1063 goto out; 1064 1065 do { 1066 if (so->so_state & SS_CANTSENDMORE) 1067 gotoerr(EPIPE); 1068 if (so->so_error) { 1069 error = so->so_error; 1070 so->so_error = 0; 1071 goto release; 1072 } 1073 if ((so->so_state & SS_ISCONNECTED) == 0 && 1074 (so->so_state & SS_ISCONFIRMING) == 0) 1075 gotoerr(ENOTCONN); 1076 if (allatonce && resid > so->so_snd.ssb_hiwat) 1077 gotoerr(EMSGSIZE); 1078 1079 space = ssb_space_prealloc(&so->so_snd); 1080 if (flags & MSG_OOB) 1081 space += 1024; 1082 if ((space < 0 || (size_t)space < resid) && !allatonce && 1083 space < so->so_snd.ssb_lowat) { 1084 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1085 gotoerr(EWOULDBLOCK); 1086 ssb_unlock(&so->so_snd); 1087 error = ssb_wait(&so->so_snd); 1088 if (error) 1089 goto out; 1090 goto restart; 1091 } 1092 mp = ⊤ 1093 do { 1094 int cnt = 0, async = 0; 1095 1096 if (uio == NULL) { 1097 /* 1098 * Data is prepackaged in "top". 1099 */ 1100 resid = 0; 1101 } else do { 1102 if (resid > INT_MAX) 1103 resid = INT_MAX; 1104 m = m_getl((int)resid, MB_WAIT, MT_DATA, 1105 top == NULL ? M_PKTHDR : 0, &mlen); 1106 if (top == NULL) { 1107 m->m_pkthdr.len = 0; 1108 m->m_pkthdr.rcvif = NULL; 1109 } 1110 len = imin((int)szmin(mlen, resid), space); 1111 space -= len; 1112 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1113 resid = uio->uio_resid; 1114 m->m_len = len; 1115 *mp = m; 1116 top->m_pkthdr.len += len; 1117 if (error) 1118 goto release; 1119 mp = &m->m_next; 1120 if (resid == 0) 1121 break; 1122 ++cnt; 1123 } while (space > 0 && cnt < tcp_sosend_agglim); 1124 1125 if (tcp_sosend_async) 1126 async = 1; 1127 1128 if (flags & MSG_OOB) { 1129 pru_flags = PRUS_OOB; 1130 async = 0; 1131 } else if ((flags & MSG_EOF) && resid == 0) { 1132 pru_flags = PRUS_EOF; 1133 } else if (resid > 0 && space > 0) { 1134 /* If there is more to send, set PRUS_MORETOCOME */ 1135 pru_flags = PRUS_MORETOCOME; 1136 async = 1; 1137 } else { 1138 pru_flags = 0; 1139 } 1140 1141 if (flags & MSG_SYNC) 1142 async = 0; 1143 1144 /* 1145 * XXX all the SS_CANTSENDMORE checks previously 1146 * done could be out of date. We could have recieved 1147 * a reset packet in an interrupt or maybe we slept 1148 * while doing page faults in uiomove() etc. We could 1149 * probably recheck again inside the splnet() protection 1150 * here, but there are probably other places that this 1151 * also happens. We must rethink this. 1152 */ 1153 for (m = top; m; m = m->m_next) 1154 ssb_preallocstream(&so->so_snd, m); 1155 if (!async) { 1156 error = so_pru_send(so, pru_flags, top, 1157 NULL, NULL, td); 1158 } else { 1159 so_pru_send_async(so, pru_flags, top, 1160 NULL, NULL, td); 1161 error = 0; 1162 } 1163 1164 top = NULL; 1165 mp = ⊤ 1166 if (error) 1167 goto release; 1168 } while (resid && space > 0); 1169 } while (resid); 1170 1171 release: 1172 ssb_unlock(&so->so_snd); 1173 out: 1174 if (top) 1175 m_freem(top); 1176 if (control) 1177 m_freem(control); 1178 return (error); 1179 } 1180 #endif 1181 1182 /* 1183 * Implement receive operations on a socket. 1184 * 1185 * We depend on the way that records are added to the signalsockbuf 1186 * by sbappend*. In particular, each record (mbufs linked through m_next) 1187 * must begin with an address if the protocol so specifies, 1188 * followed by an optional mbuf or mbufs containing ancillary data, 1189 * and then zero or more mbufs of data. 1190 * 1191 * Although the signalsockbuf is locked, new data may still be appended. 1192 * A token inside the ssb_lock deals with MP issues and still allows 1193 * the network to access the socket if we block in a uio. 1194 * 1195 * The caller may receive the data as a single mbuf chain by supplying 1196 * an mbuf **mp0 for use in returning the chain. The uio is then used 1197 * only for the count in uio_resid. 1198 */ 1199 int 1200 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1201 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1202 { 1203 struct mbuf *m, *n; 1204 struct mbuf *free_chain = NULL; 1205 int flags, len, error, offset; 1206 struct protosw *pr = so->so_proto; 1207 int moff, type = 0; 1208 size_t resid, orig_resid; 1209 1210 if (uio) 1211 resid = uio->uio_resid; 1212 else 1213 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1214 orig_resid = resid; 1215 1216 if (psa) 1217 *psa = NULL; 1218 if (controlp) 1219 *controlp = NULL; 1220 if (flagsp) 1221 flags = *flagsp &~ MSG_EOR; 1222 else 1223 flags = 0; 1224 if (flags & MSG_OOB) { 1225 m = m_get(MB_WAIT, MT_DATA); 1226 if (m == NULL) 1227 return (ENOBUFS); 1228 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1229 if (error) 1230 goto bad; 1231 if (sio) { 1232 do { 1233 sbappend(sio, m); 1234 KKASSERT(resid >= (size_t)m->m_len); 1235 resid -= (size_t)m->m_len; 1236 } while (resid > 0 && m); 1237 } else { 1238 do { 1239 uio->uio_resid = resid; 1240 error = uiomove(mtod(m, caddr_t), 1241 (int)szmin(resid, m->m_len), 1242 uio); 1243 resid = uio->uio_resid; 1244 m = m_free(m); 1245 } while (uio->uio_resid && error == 0 && m); 1246 } 1247 bad: 1248 if (m) 1249 m_freem(m); 1250 return (error); 1251 } 1252 if ((so->so_state & SS_ISCONFIRMING) && resid) 1253 so_pru_rcvd(so, 0); 1254 1255 /* 1256 * The token interlocks against the protocol thread while 1257 * ssb_lock is a blocking lock against other userland entities. 1258 */ 1259 lwkt_gettoken(&so->so_rcv.ssb_token); 1260 restart: 1261 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1262 if (error) 1263 goto done; 1264 1265 m = so->so_rcv.ssb_mb; 1266 /* 1267 * If we have less data than requested, block awaiting more 1268 * (subject to any timeout) if: 1269 * 1. the current count is less than the low water mark, or 1270 * 2. MSG_WAITALL is set, and it is possible to do the entire 1271 * receive operation at once if we block (resid <= hiwat). 1272 * 3. MSG_DONTWAIT is not set 1273 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1274 * we have to do the receive in sections, and thus risk returning 1275 * a short count if a timeout or signal occurs after we start. 1276 */ 1277 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1278 (size_t)so->so_rcv.ssb_cc < resid) && 1279 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1280 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1281 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1282 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1283 if (so->so_error) { 1284 if (m) 1285 goto dontblock; 1286 error = so->so_error; 1287 if ((flags & MSG_PEEK) == 0) 1288 so->so_error = 0; 1289 goto release; 1290 } 1291 if (so->so_state & SS_CANTRCVMORE) { 1292 if (m) 1293 goto dontblock; 1294 else 1295 goto release; 1296 } 1297 for (; m; m = m->m_next) { 1298 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1299 m = so->so_rcv.ssb_mb; 1300 goto dontblock; 1301 } 1302 } 1303 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1304 (pr->pr_flags & PR_CONNREQUIRED)) { 1305 error = ENOTCONN; 1306 goto release; 1307 } 1308 if (resid == 0) 1309 goto release; 1310 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1311 error = EWOULDBLOCK; 1312 goto release; 1313 } 1314 ssb_unlock(&so->so_rcv); 1315 error = ssb_wait(&so->so_rcv); 1316 if (error) 1317 goto done; 1318 goto restart; 1319 } 1320 dontblock: 1321 if (uio && uio->uio_td && uio->uio_td->td_proc) 1322 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1323 1324 /* 1325 * note: m should be == sb_mb here. Cache the next record while 1326 * cleaning up. Note that calling m_free*() will break out critical 1327 * section. 1328 */ 1329 KKASSERT(m == so->so_rcv.ssb_mb); 1330 1331 /* 1332 * Skip any address mbufs prepending the record. 1333 */ 1334 if (pr->pr_flags & PR_ADDR) { 1335 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1336 orig_resid = 0; 1337 if (psa) 1338 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1339 if (flags & MSG_PEEK) 1340 m = m->m_next; 1341 else 1342 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1343 } 1344 1345 /* 1346 * Skip any control mbufs prepending the record. 1347 */ 1348 #ifdef SCTP 1349 if (pr->pr_flags & PR_ADDR_OPT) { 1350 /* 1351 * For SCTP we may be getting a 1352 * whole message OR a partial delivery. 1353 */ 1354 if (m && m->m_type == MT_SONAME) { 1355 orig_resid = 0; 1356 if (psa) 1357 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1358 if (flags & MSG_PEEK) 1359 m = m->m_next; 1360 else 1361 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1362 } 1363 } 1364 #endif /* SCTP */ 1365 while (m && m->m_type == MT_CONTROL && error == 0) { 1366 if (flags & MSG_PEEK) { 1367 if (controlp) 1368 *controlp = m_copy(m, 0, m->m_len); 1369 m = m->m_next; /* XXX race */ 1370 } else { 1371 if (controlp) { 1372 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1373 if (pr->pr_domain->dom_externalize && 1374 mtod(m, struct cmsghdr *)->cmsg_type == 1375 SCM_RIGHTS) 1376 error = (*pr->pr_domain->dom_externalize)(m); 1377 *controlp = m; 1378 m = n; 1379 } else { 1380 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1381 } 1382 } 1383 if (controlp && *controlp) { 1384 orig_resid = 0; 1385 controlp = &(*controlp)->m_next; 1386 } 1387 } 1388 1389 /* 1390 * flag OOB data. 1391 */ 1392 if (m) { 1393 type = m->m_type; 1394 if (type == MT_OOBDATA) 1395 flags |= MSG_OOB; 1396 } 1397 1398 /* 1399 * Copy to the UIO or mbuf return chain (*mp). 1400 */ 1401 moff = 0; 1402 offset = 0; 1403 while (m && resid > 0 && error == 0) { 1404 if (m->m_type == MT_OOBDATA) { 1405 if (type != MT_OOBDATA) 1406 break; 1407 } else if (type == MT_OOBDATA) 1408 break; 1409 else 1410 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1411 ("receive 3")); 1412 soclrstate(so, SS_RCVATMARK); 1413 len = (resid > INT_MAX) ? INT_MAX : resid; 1414 if (so->so_oobmark && len > so->so_oobmark - offset) 1415 len = so->so_oobmark - offset; 1416 if (len > m->m_len - moff) 1417 len = m->m_len - moff; 1418 1419 /* 1420 * Copy out to the UIO or pass the mbufs back to the SIO. 1421 * The SIO is dealt with when we eat the mbuf, but deal 1422 * with the resid here either way. 1423 */ 1424 if (uio) { 1425 uio->uio_resid = resid; 1426 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1427 resid = uio->uio_resid; 1428 if (error) 1429 goto release; 1430 } else { 1431 resid -= (size_t)len; 1432 } 1433 1434 /* 1435 * Eat the entire mbuf or just a piece of it 1436 */ 1437 if (len == m->m_len - moff) { 1438 if (m->m_flags & M_EOR) 1439 flags |= MSG_EOR; 1440 #ifdef SCTP 1441 if (m->m_flags & M_NOTIFICATION) 1442 flags |= MSG_NOTIFICATION; 1443 #endif /* SCTP */ 1444 if (flags & MSG_PEEK) { 1445 m = m->m_next; 1446 moff = 0; 1447 } else { 1448 if (sio) { 1449 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1450 sbappend(sio, m); 1451 m = n; 1452 } else { 1453 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1454 } 1455 } 1456 } else { 1457 if (flags & MSG_PEEK) { 1458 moff += len; 1459 } else { 1460 if (sio) { 1461 n = m_copym(m, 0, len, MB_WAIT); 1462 if (n) 1463 sbappend(sio, n); 1464 } 1465 m->m_data += len; 1466 m->m_len -= len; 1467 so->so_rcv.ssb_cc -= len; 1468 } 1469 } 1470 if (so->so_oobmark) { 1471 if ((flags & MSG_PEEK) == 0) { 1472 so->so_oobmark -= len; 1473 if (so->so_oobmark == 0) { 1474 sosetstate(so, SS_RCVATMARK); 1475 break; 1476 } 1477 } else { 1478 offset += len; 1479 if (offset == so->so_oobmark) 1480 break; 1481 } 1482 } 1483 if (flags & MSG_EOR) 1484 break; 1485 /* 1486 * If the MSG_WAITALL flag is set (for non-atomic socket), 1487 * we must not quit until resid == 0 or an error 1488 * termination. If a signal/timeout occurs, return 1489 * with a short count but without error. 1490 * Keep signalsockbuf locked against other readers. 1491 */ 1492 while ((flags & MSG_WAITALL) && m == NULL && 1493 resid > 0 && !sosendallatonce(so) && 1494 so->so_rcv.ssb_mb == NULL) { 1495 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1496 break; 1497 /* 1498 * The window might have closed to zero, make 1499 * sure we send an ack now that we've drained 1500 * the buffer or we might end up blocking until 1501 * the idle takes over (5 seconds). 1502 */ 1503 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1504 so_pru_rcvd(so, flags); 1505 error = ssb_wait(&so->so_rcv); 1506 if (error) { 1507 ssb_unlock(&so->so_rcv); 1508 error = 0; 1509 goto done; 1510 } 1511 m = so->so_rcv.ssb_mb; 1512 } 1513 } 1514 1515 /* 1516 * If an atomic read was requested but unread data still remains 1517 * in the record, set MSG_TRUNC. 1518 */ 1519 if (m && pr->pr_flags & PR_ATOMIC) 1520 flags |= MSG_TRUNC; 1521 1522 /* 1523 * Cleanup. If an atomic read was requested drop any unread data. 1524 */ 1525 if ((flags & MSG_PEEK) == 0) { 1526 if (m && (pr->pr_flags & PR_ATOMIC)) 1527 sbdroprecord(&so->so_rcv.sb); 1528 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1529 so_pru_rcvd(so, flags); 1530 } 1531 1532 if (orig_resid == resid && orig_resid && 1533 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1534 ssb_unlock(&so->so_rcv); 1535 goto restart; 1536 } 1537 1538 if (flagsp) 1539 *flagsp |= flags; 1540 release: 1541 ssb_unlock(&so->so_rcv); 1542 done: 1543 lwkt_reltoken(&so->so_rcv.ssb_token); 1544 if (free_chain) 1545 m_freem(free_chain); 1546 return (error); 1547 } 1548 1549 int 1550 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1551 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1552 { 1553 struct mbuf *m, *n; 1554 struct mbuf *free_chain = NULL; 1555 int flags, len, error, offset; 1556 struct protosw *pr = so->so_proto; 1557 int moff; 1558 size_t resid, orig_resid; 1559 1560 if (uio) 1561 resid = uio->uio_resid; 1562 else 1563 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1564 orig_resid = resid; 1565 1566 if (psa) 1567 *psa = NULL; 1568 if (controlp) 1569 *controlp = NULL; 1570 if (flagsp) 1571 flags = *flagsp &~ MSG_EOR; 1572 else 1573 flags = 0; 1574 if (flags & MSG_OOB) { 1575 m = m_get(MB_WAIT, MT_DATA); 1576 if (m == NULL) 1577 return (ENOBUFS); 1578 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1579 if (error) 1580 goto bad; 1581 if (sio) { 1582 do { 1583 sbappend(sio, m); 1584 KKASSERT(resid >= (size_t)m->m_len); 1585 resid -= (size_t)m->m_len; 1586 } while (resid > 0 && m); 1587 } else { 1588 do { 1589 uio->uio_resid = resid; 1590 error = uiomove(mtod(m, caddr_t), 1591 (int)szmin(resid, m->m_len), 1592 uio); 1593 resid = uio->uio_resid; 1594 m = m_free(m); 1595 } while (uio->uio_resid && error == 0 && m); 1596 } 1597 bad: 1598 if (m) 1599 m_freem(m); 1600 return (error); 1601 } 1602 1603 /* 1604 * The token interlocks against the protocol thread while 1605 * ssb_lock is a blocking lock against other userland entities. 1606 */ 1607 lwkt_gettoken(&so->so_rcv.ssb_token); 1608 restart: 1609 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1610 if (error) 1611 goto done; 1612 1613 m = so->so_rcv.ssb_mb; 1614 /* 1615 * If we have less data than requested, block awaiting more 1616 * (subject to any timeout) if: 1617 * 1. the current count is less than the low water mark, or 1618 * 2. MSG_WAITALL is set, and it is possible to do the entire 1619 * receive operation at once if we block (resid <= hiwat). 1620 * 3. MSG_DONTWAIT is not set 1621 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1622 * we have to do the receive in sections, and thus risk returning 1623 * a short count if a timeout or signal occurs after we start. 1624 */ 1625 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1626 (size_t)so->so_rcv.ssb_cc < resid) && 1627 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1628 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1629 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1630 if (so->so_error) { 1631 if (m) 1632 goto dontblock; 1633 error = so->so_error; 1634 if ((flags & MSG_PEEK) == 0) 1635 so->so_error = 0; 1636 goto release; 1637 } 1638 if (so->so_state & SS_CANTRCVMORE) { 1639 if (m) 1640 goto dontblock; 1641 else 1642 goto release; 1643 } 1644 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1645 (pr->pr_flags & PR_CONNREQUIRED)) { 1646 error = ENOTCONN; 1647 goto release; 1648 } 1649 if (resid == 0) 1650 goto release; 1651 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1652 error = EWOULDBLOCK; 1653 goto release; 1654 } 1655 ssb_unlock(&so->so_rcv); 1656 error = ssb_wait(&so->so_rcv); 1657 if (error) 1658 goto done; 1659 goto restart; 1660 } 1661 dontblock: 1662 if (uio && uio->uio_td && uio->uio_td->td_proc) 1663 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1664 1665 /* 1666 * note: m should be == sb_mb here. Cache the next record while 1667 * cleaning up. Note that calling m_free*() will break out critical 1668 * section. 1669 */ 1670 KKASSERT(m == so->so_rcv.ssb_mb); 1671 1672 /* 1673 * Copy to the UIO or mbuf return chain (*mp). 1674 */ 1675 moff = 0; 1676 offset = 0; 1677 while (m && resid > 0 && error == 0) { 1678 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1679 ("receive 3")); 1680 1681 soclrstate(so, SS_RCVATMARK); 1682 len = (resid > INT_MAX) ? INT_MAX : resid; 1683 if (so->so_oobmark && len > so->so_oobmark - offset) 1684 len = so->so_oobmark - offset; 1685 if (len > m->m_len - moff) 1686 len = m->m_len - moff; 1687 1688 /* 1689 * Copy out to the UIO or pass the mbufs back to the SIO. 1690 * The SIO is dealt with when we eat the mbuf, but deal 1691 * with the resid here either way. 1692 */ 1693 if (uio) { 1694 uio->uio_resid = resid; 1695 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1696 resid = uio->uio_resid; 1697 if (error) 1698 goto release; 1699 } else { 1700 resid -= (size_t)len; 1701 } 1702 1703 /* 1704 * Eat the entire mbuf or just a piece of it 1705 */ 1706 if (len == m->m_len - moff) { 1707 if (flags & MSG_PEEK) { 1708 m = m->m_next; 1709 moff = 0; 1710 } else { 1711 if (sio) { 1712 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1713 sbappend(sio, m); 1714 m = n; 1715 } else { 1716 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1717 } 1718 } 1719 } else { 1720 if (flags & MSG_PEEK) { 1721 moff += len; 1722 } else { 1723 if (sio) { 1724 n = m_copym(m, 0, len, MB_WAIT); 1725 if (n) 1726 sbappend(sio, n); 1727 } 1728 m->m_data += len; 1729 m->m_len -= len; 1730 so->so_rcv.ssb_cc -= len; 1731 } 1732 } 1733 if (so->so_oobmark) { 1734 if ((flags & MSG_PEEK) == 0) { 1735 so->so_oobmark -= len; 1736 if (so->so_oobmark == 0) { 1737 sosetstate(so, SS_RCVATMARK); 1738 break; 1739 } 1740 } else { 1741 offset += len; 1742 if (offset == so->so_oobmark) 1743 break; 1744 } 1745 } 1746 /* 1747 * If the MSG_WAITALL flag is set (for non-atomic socket), 1748 * we must not quit until resid == 0 or an error 1749 * termination. If a signal/timeout occurs, return 1750 * with a short count but without error. 1751 * Keep signalsockbuf locked against other readers. 1752 */ 1753 while ((flags & MSG_WAITALL) && m == NULL && 1754 resid > 0 && !sosendallatonce(so) && 1755 so->so_rcv.ssb_mb == NULL) { 1756 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1757 break; 1758 /* 1759 * The window might have closed to zero, make 1760 * sure we send an ack now that we've drained 1761 * the buffer or we might end up blocking until 1762 * the idle takes over (5 seconds). 1763 */ 1764 if (so->so_pcb) 1765 so_pru_rcvd_async(so); 1766 error = ssb_wait(&so->so_rcv); 1767 if (error) { 1768 ssb_unlock(&so->so_rcv); 1769 error = 0; 1770 goto done; 1771 } 1772 m = so->so_rcv.ssb_mb; 1773 } 1774 } 1775 1776 /* 1777 * Cleanup. If an atomic read was requested drop any unread data. 1778 */ 1779 if ((flags & MSG_PEEK) == 0) { 1780 if (so->so_pcb) 1781 so_pru_rcvd_async(so); 1782 } 1783 1784 if (orig_resid == resid && orig_resid && 1785 (so->so_state & SS_CANTRCVMORE) == 0) { 1786 ssb_unlock(&so->so_rcv); 1787 goto restart; 1788 } 1789 1790 if (flagsp) 1791 *flagsp |= flags; 1792 release: 1793 ssb_unlock(&so->so_rcv); 1794 done: 1795 lwkt_reltoken(&so->so_rcv.ssb_token); 1796 if (free_chain) 1797 m_freem(free_chain); 1798 return (error); 1799 } 1800 1801 /* 1802 * Shut a socket down. Note that we do not get a frontend lock as we 1803 * want to be able to shut the socket down even if another thread is 1804 * blocked in a read(), thus waking it up. 1805 */ 1806 int 1807 soshutdown(struct socket *so, int how) 1808 { 1809 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1810 return (EINVAL); 1811 1812 if (how != SHUT_WR) { 1813 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1814 sorflush(so); 1815 /*ssb_unlock(&so->so_rcv);*/ 1816 } 1817 if (how != SHUT_RD) 1818 return (so_pru_shutdown(so)); 1819 return (0); 1820 } 1821 1822 void 1823 sorflush(struct socket *so) 1824 { 1825 struct signalsockbuf *ssb = &so->so_rcv; 1826 struct protosw *pr = so->so_proto; 1827 struct signalsockbuf asb; 1828 1829 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1830 1831 lwkt_gettoken(&ssb->ssb_token); 1832 socantrcvmore(so); 1833 asb = *ssb; 1834 1835 /* 1836 * Can't just blow up the ssb structure here 1837 */ 1838 bzero(&ssb->sb, sizeof(ssb->sb)); 1839 ssb->ssb_timeo = 0; 1840 ssb->ssb_lowat = 0; 1841 ssb->ssb_hiwat = 0; 1842 ssb->ssb_mbmax = 0; 1843 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1844 1845 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1846 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1847 ssb_release(&asb, so); 1848 1849 lwkt_reltoken(&ssb->ssb_token); 1850 } 1851 1852 #ifdef INET 1853 static int 1854 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1855 { 1856 struct accept_filter_arg *afap = NULL; 1857 struct accept_filter *afp; 1858 struct so_accf *af = so->so_accf; 1859 int error = 0; 1860 1861 /* do not set/remove accept filters on non listen sockets */ 1862 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1863 error = EINVAL; 1864 goto out; 1865 } 1866 1867 /* removing the filter */ 1868 if (sopt == NULL) { 1869 if (af != NULL) { 1870 if (af->so_accept_filter != NULL && 1871 af->so_accept_filter->accf_destroy != NULL) { 1872 af->so_accept_filter->accf_destroy(so); 1873 } 1874 if (af->so_accept_filter_str != NULL) { 1875 kfree(af->so_accept_filter_str, M_ACCF); 1876 } 1877 kfree(af, M_ACCF); 1878 so->so_accf = NULL; 1879 } 1880 so->so_options &= ~SO_ACCEPTFILTER; 1881 return (0); 1882 } 1883 /* adding a filter */ 1884 /* must remove previous filter first */ 1885 if (af != NULL) { 1886 error = EINVAL; 1887 goto out; 1888 } 1889 /* don't put large objects on the kernel stack */ 1890 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 1891 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1892 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1893 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1894 if (error) 1895 goto out; 1896 afp = accept_filt_get(afap->af_name); 1897 if (afp == NULL) { 1898 error = ENOENT; 1899 goto out; 1900 } 1901 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1902 if (afp->accf_create != NULL) { 1903 if (afap->af_name[0] != '\0') { 1904 int len = strlen(afap->af_name) + 1; 1905 1906 af->so_accept_filter_str = kmalloc(len, M_ACCF, 1907 M_WAITOK); 1908 strcpy(af->so_accept_filter_str, afap->af_name); 1909 } 1910 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1911 if (af->so_accept_filter_arg == NULL) { 1912 kfree(af->so_accept_filter_str, M_ACCF); 1913 kfree(af, M_ACCF); 1914 so->so_accf = NULL; 1915 error = EINVAL; 1916 goto out; 1917 } 1918 } 1919 af->so_accept_filter = afp; 1920 so->so_accf = af; 1921 so->so_options |= SO_ACCEPTFILTER; 1922 out: 1923 if (afap != NULL) 1924 kfree(afap, M_TEMP); 1925 return (error); 1926 } 1927 #endif /* INET */ 1928 1929 /* 1930 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1931 * an additional variant to handle the case where the option value needs 1932 * to be some kind of integer, but not a specific size. 1933 * In addition to their use here, these functions are also called by the 1934 * protocol-level pr_ctloutput() routines. 1935 */ 1936 int 1937 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1938 { 1939 return soopt_to_kbuf(sopt, buf, len, minlen); 1940 } 1941 1942 int 1943 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1944 { 1945 size_t valsize; 1946 1947 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1948 KKASSERT(kva_p(buf)); 1949 1950 /* 1951 * If the user gives us more than we wanted, we ignore it, 1952 * but if we don't get the minimum length the caller 1953 * wants, we return EINVAL. On success, sopt->sopt_valsize 1954 * is set to however much we actually retrieved. 1955 */ 1956 if ((valsize = sopt->sopt_valsize) < minlen) 1957 return EINVAL; 1958 if (valsize > len) 1959 sopt->sopt_valsize = valsize = len; 1960 1961 bcopy(sopt->sopt_val, buf, valsize); 1962 return 0; 1963 } 1964 1965 1966 int 1967 sosetopt(struct socket *so, struct sockopt *sopt) 1968 { 1969 int error, optval; 1970 struct linger l; 1971 struct timeval tv; 1972 u_long val; 1973 struct signalsockbuf *sotmp; 1974 1975 error = 0; 1976 sopt->sopt_dir = SOPT_SET; 1977 if (sopt->sopt_level != SOL_SOCKET) { 1978 if (so->so_proto && so->so_proto->pr_ctloutput) { 1979 return (so_pr_ctloutput(so, sopt)); 1980 } 1981 error = ENOPROTOOPT; 1982 } else { 1983 switch (sopt->sopt_name) { 1984 #ifdef INET 1985 case SO_ACCEPTFILTER: 1986 error = do_setopt_accept_filter(so, sopt); 1987 if (error) 1988 goto bad; 1989 break; 1990 #endif /* INET */ 1991 case SO_LINGER: 1992 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1993 if (error) 1994 goto bad; 1995 1996 so->so_linger = l.l_linger; 1997 if (l.l_onoff) 1998 so->so_options |= SO_LINGER; 1999 else 2000 so->so_options &= ~SO_LINGER; 2001 break; 2002 2003 case SO_DEBUG: 2004 case SO_KEEPALIVE: 2005 case SO_DONTROUTE: 2006 case SO_USELOOPBACK: 2007 case SO_BROADCAST: 2008 case SO_REUSEADDR: 2009 case SO_REUSEPORT: 2010 case SO_OOBINLINE: 2011 case SO_TIMESTAMP: 2012 case SO_NOSIGPIPE: 2013 error = sooptcopyin(sopt, &optval, sizeof optval, 2014 sizeof optval); 2015 if (error) 2016 goto bad; 2017 if (optval) 2018 so->so_options |= sopt->sopt_name; 2019 else 2020 so->so_options &= ~sopt->sopt_name; 2021 break; 2022 2023 case SO_SNDBUF: 2024 case SO_RCVBUF: 2025 case SO_SNDLOWAT: 2026 case SO_RCVLOWAT: 2027 error = sooptcopyin(sopt, &optval, sizeof optval, 2028 sizeof optval); 2029 if (error) 2030 goto bad; 2031 2032 /* 2033 * Values < 1 make no sense for any of these 2034 * options, so disallow them. 2035 */ 2036 if (optval < 1) { 2037 error = EINVAL; 2038 goto bad; 2039 } 2040 2041 switch (sopt->sopt_name) { 2042 case SO_SNDBUF: 2043 case SO_RCVBUF: 2044 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2045 &so->so_snd : &so->so_rcv, (u_long)optval, 2046 so, 2047 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2048 error = ENOBUFS; 2049 goto bad; 2050 } 2051 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2052 &so->so_snd : &so->so_rcv; 2053 atomic_clear_int(&sotmp->ssb_flags, 2054 SSB_AUTOSIZE); 2055 break; 2056 2057 /* 2058 * Make sure the low-water is never greater than 2059 * the high-water. 2060 */ 2061 case SO_SNDLOWAT: 2062 so->so_snd.ssb_lowat = 2063 (optval > so->so_snd.ssb_hiwat) ? 2064 so->so_snd.ssb_hiwat : optval; 2065 atomic_clear_int(&so->so_snd.ssb_flags, 2066 SSB_AUTOLOWAT); 2067 break; 2068 case SO_RCVLOWAT: 2069 so->so_rcv.ssb_lowat = 2070 (optval > so->so_rcv.ssb_hiwat) ? 2071 so->so_rcv.ssb_hiwat : optval; 2072 atomic_clear_int(&so->so_rcv.ssb_flags, 2073 SSB_AUTOLOWAT); 2074 break; 2075 } 2076 break; 2077 2078 case SO_SNDTIMEO: 2079 case SO_RCVTIMEO: 2080 error = sooptcopyin(sopt, &tv, sizeof tv, 2081 sizeof tv); 2082 if (error) 2083 goto bad; 2084 2085 /* assert(hz > 0); */ 2086 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2087 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2088 error = EDOM; 2089 goto bad; 2090 } 2091 /* assert(tick > 0); */ 2092 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2093 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2094 if (val > INT_MAX) { 2095 error = EDOM; 2096 goto bad; 2097 } 2098 if (val == 0 && tv.tv_usec != 0) 2099 val = 1; 2100 2101 switch (sopt->sopt_name) { 2102 case SO_SNDTIMEO: 2103 so->so_snd.ssb_timeo = val; 2104 break; 2105 case SO_RCVTIMEO: 2106 so->so_rcv.ssb_timeo = val; 2107 break; 2108 } 2109 break; 2110 default: 2111 error = ENOPROTOOPT; 2112 break; 2113 } 2114 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2115 (void) so_pr_ctloutput(so, sopt); 2116 } 2117 } 2118 bad: 2119 return (error); 2120 } 2121 2122 /* Helper routine for getsockopt */ 2123 int 2124 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2125 { 2126 soopt_from_kbuf(sopt, buf, len); 2127 return 0; 2128 } 2129 2130 void 2131 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2132 { 2133 size_t valsize; 2134 2135 if (len == 0) { 2136 sopt->sopt_valsize = 0; 2137 return; 2138 } 2139 2140 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2141 KKASSERT(kva_p(buf)); 2142 2143 /* 2144 * Documented get behavior is that we always return a value, 2145 * possibly truncated to fit in the user's buffer. 2146 * Traditional behavior is that we always tell the user 2147 * precisely how much we copied, rather than something useful 2148 * like the total amount we had available for her. 2149 * Note that this interface is not idempotent; the entire answer must 2150 * generated ahead of time. 2151 */ 2152 valsize = szmin(len, sopt->sopt_valsize); 2153 sopt->sopt_valsize = valsize; 2154 if (sopt->sopt_val != 0) { 2155 bcopy(buf, sopt->sopt_val, valsize); 2156 } 2157 } 2158 2159 int 2160 sogetopt(struct socket *so, struct sockopt *sopt) 2161 { 2162 int error, optval; 2163 long optval_l; 2164 struct linger l; 2165 struct timeval tv; 2166 #ifdef INET 2167 struct accept_filter_arg *afap; 2168 #endif 2169 2170 error = 0; 2171 sopt->sopt_dir = SOPT_GET; 2172 if (sopt->sopt_level != SOL_SOCKET) { 2173 if (so->so_proto && so->so_proto->pr_ctloutput) { 2174 return (so_pr_ctloutput(so, sopt)); 2175 } else 2176 return (ENOPROTOOPT); 2177 } else { 2178 switch (sopt->sopt_name) { 2179 #ifdef INET 2180 case SO_ACCEPTFILTER: 2181 if ((so->so_options & SO_ACCEPTCONN) == 0) 2182 return (EINVAL); 2183 afap = kmalloc(sizeof(*afap), M_TEMP, 2184 M_WAITOK | M_ZERO); 2185 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2186 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2187 if (so->so_accf->so_accept_filter_str != NULL) 2188 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2189 } 2190 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2191 kfree(afap, M_TEMP); 2192 break; 2193 #endif /* INET */ 2194 2195 case SO_LINGER: 2196 l.l_onoff = so->so_options & SO_LINGER; 2197 l.l_linger = so->so_linger; 2198 error = sooptcopyout(sopt, &l, sizeof l); 2199 break; 2200 2201 case SO_USELOOPBACK: 2202 case SO_DONTROUTE: 2203 case SO_DEBUG: 2204 case SO_KEEPALIVE: 2205 case SO_REUSEADDR: 2206 case SO_REUSEPORT: 2207 case SO_BROADCAST: 2208 case SO_OOBINLINE: 2209 case SO_TIMESTAMP: 2210 case SO_NOSIGPIPE: 2211 optval = so->so_options & sopt->sopt_name; 2212 integer: 2213 error = sooptcopyout(sopt, &optval, sizeof optval); 2214 break; 2215 2216 case SO_TYPE: 2217 optval = so->so_type; 2218 goto integer; 2219 2220 case SO_ERROR: 2221 optval = so->so_error; 2222 so->so_error = 0; 2223 goto integer; 2224 2225 case SO_SNDBUF: 2226 optval = so->so_snd.ssb_hiwat; 2227 goto integer; 2228 2229 case SO_RCVBUF: 2230 optval = so->so_rcv.ssb_hiwat; 2231 goto integer; 2232 2233 case SO_SNDLOWAT: 2234 optval = so->so_snd.ssb_lowat; 2235 goto integer; 2236 2237 case SO_RCVLOWAT: 2238 optval = so->so_rcv.ssb_lowat; 2239 goto integer; 2240 2241 case SO_SNDTIMEO: 2242 case SO_RCVTIMEO: 2243 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2244 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2245 2246 tv.tv_sec = optval / hz; 2247 tv.tv_usec = (optval % hz) * ustick; 2248 error = sooptcopyout(sopt, &tv, sizeof tv); 2249 break; 2250 2251 case SO_SNDSPACE: 2252 optval_l = ssb_space(&so->so_snd); 2253 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2254 break; 2255 2256 default: 2257 error = ENOPROTOOPT; 2258 break; 2259 } 2260 return (error); 2261 } 2262 } 2263 2264 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2265 int 2266 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2267 { 2268 struct mbuf *m, *m_prev; 2269 int sopt_size = sopt->sopt_valsize, msize; 2270 2271 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA, 2272 0, &msize); 2273 if (m == NULL) 2274 return (ENOBUFS); 2275 m->m_len = min(msize, sopt_size); 2276 sopt_size -= m->m_len; 2277 *mp = m; 2278 m_prev = m; 2279 2280 while (sopt_size > 0) { 2281 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, 2282 MT_DATA, 0, &msize); 2283 if (m == NULL) { 2284 m_freem(*mp); 2285 return (ENOBUFS); 2286 } 2287 m->m_len = min(msize, sopt_size); 2288 sopt_size -= m->m_len; 2289 m_prev->m_next = m; 2290 m_prev = m; 2291 } 2292 return (0); 2293 } 2294 2295 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2296 int 2297 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2298 { 2299 soopt_to_mbuf(sopt, m); 2300 return 0; 2301 } 2302 2303 void 2304 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2305 { 2306 size_t valsize; 2307 void *val; 2308 2309 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2310 KKASSERT(kva_p(m)); 2311 if (sopt->sopt_val == NULL) 2312 return; 2313 val = sopt->sopt_val; 2314 valsize = sopt->sopt_valsize; 2315 while (m != NULL && valsize >= m->m_len) { 2316 bcopy(val, mtod(m, char *), m->m_len); 2317 valsize -= m->m_len; 2318 val = (caddr_t)val + m->m_len; 2319 m = m->m_next; 2320 } 2321 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2322 panic("ip6_sooptmcopyin"); 2323 } 2324 2325 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2326 int 2327 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2328 { 2329 return soopt_from_mbuf(sopt, m); 2330 } 2331 2332 int 2333 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2334 { 2335 struct mbuf *m0 = m; 2336 size_t valsize = 0; 2337 size_t maxsize; 2338 void *val; 2339 2340 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2341 KKASSERT(kva_p(m)); 2342 if (sopt->sopt_val == NULL) 2343 return 0; 2344 val = sopt->sopt_val; 2345 maxsize = sopt->sopt_valsize; 2346 while (m != NULL && maxsize >= m->m_len) { 2347 bcopy(mtod(m, char *), val, m->m_len); 2348 maxsize -= m->m_len; 2349 val = (caddr_t)val + m->m_len; 2350 valsize += m->m_len; 2351 m = m->m_next; 2352 } 2353 if (m != NULL) { 2354 /* enough soopt buffer should be given from user-land */ 2355 m_freem(m0); 2356 return (EINVAL); 2357 } 2358 sopt->sopt_valsize = valsize; 2359 return 0; 2360 } 2361 2362 void 2363 sohasoutofband(struct socket *so) 2364 { 2365 if (so->so_sigio != NULL) 2366 pgsigio(so->so_sigio, SIGURG, 0); 2367 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2368 } 2369 2370 int 2371 sokqfilter(struct file *fp, struct knote *kn) 2372 { 2373 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2374 struct signalsockbuf *ssb; 2375 2376 switch (kn->kn_filter) { 2377 case EVFILT_READ: 2378 if (so->so_options & SO_ACCEPTCONN) 2379 kn->kn_fop = &solisten_filtops; 2380 else 2381 kn->kn_fop = &soread_filtops; 2382 ssb = &so->so_rcv; 2383 break; 2384 case EVFILT_WRITE: 2385 kn->kn_fop = &sowrite_filtops; 2386 ssb = &so->so_snd; 2387 break; 2388 case EVFILT_EXCEPT: 2389 kn->kn_fop = &soexcept_filtops; 2390 ssb = &so->so_rcv; 2391 break; 2392 default: 2393 return (EOPNOTSUPP); 2394 } 2395 2396 knote_insert(&ssb->ssb_kq.ki_note, kn); 2397 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2398 return (0); 2399 } 2400 2401 static void 2402 filt_sordetach(struct knote *kn) 2403 { 2404 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2405 2406 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2407 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2408 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2409 } 2410 2411 /*ARGSUSED*/ 2412 static int 2413 filt_soread(struct knote *kn, long hint) 2414 { 2415 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2416 2417 if (kn->kn_sfflags & NOTE_OOB) { 2418 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2419 kn->kn_fflags |= NOTE_OOB; 2420 return (1); 2421 } 2422 return (0); 2423 } 2424 kn->kn_data = so->so_rcv.ssb_cc; 2425 2426 if (so->so_state & SS_CANTRCVMORE) { 2427 /* 2428 * Only set NODATA if all data has been exhausted. 2429 */ 2430 if (kn->kn_data == 0) 2431 kn->kn_flags |= EV_NODATA; 2432 kn->kn_flags |= EV_EOF; 2433 kn->kn_fflags = so->so_error; 2434 return (1); 2435 } 2436 if (so->so_error) /* temporary udp error */ 2437 return (1); 2438 if (kn->kn_sfflags & NOTE_LOWAT) 2439 return (kn->kn_data >= kn->kn_sdata); 2440 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2441 !TAILQ_EMPTY(&so->so_comp)); 2442 } 2443 2444 static void 2445 filt_sowdetach(struct knote *kn) 2446 { 2447 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2448 2449 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2450 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2451 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2452 } 2453 2454 /*ARGSUSED*/ 2455 static int 2456 filt_sowrite(struct knote *kn, long hint) 2457 { 2458 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2459 2460 kn->kn_data = ssb_space(&so->so_snd); 2461 if (so->so_state & SS_CANTSENDMORE) { 2462 kn->kn_flags |= (EV_EOF | EV_NODATA); 2463 kn->kn_fflags = so->so_error; 2464 return (1); 2465 } 2466 if (so->so_error) /* temporary udp error */ 2467 return (1); 2468 if (((so->so_state & SS_ISCONNECTED) == 0) && 2469 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2470 return (0); 2471 if (kn->kn_sfflags & NOTE_LOWAT) 2472 return (kn->kn_data >= kn->kn_sdata); 2473 return (kn->kn_data >= so->so_snd.ssb_lowat); 2474 } 2475 2476 /*ARGSUSED*/ 2477 static int 2478 filt_solisten(struct knote *kn, long hint) 2479 { 2480 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2481 2482 kn->kn_data = so->so_qlen; 2483 return (! TAILQ_EMPTY(&so->so_comp)); 2484 } 2485