1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 #include "opt_sctp.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/fcntl.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/domain.h> 75 #include <sys/file.h> /* for struct knote */ 76 #include <sys/kernel.h> 77 #include <sys/event.h> 78 #include <sys/proc.h> 79 #include <sys/protosw.h> 80 #include <sys/socket.h> 81 #include <sys/socketvar.h> 82 #include <sys/socketops.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/sysctl.h> 86 #include <sys/uio.h> 87 #include <sys/jail.h> 88 #include <vm/vm_zone.h> 89 #include <vm/pmap.h> 90 #include <net/netmsg2.h> 91 #include <net/netisr2.h> 92 93 #include <sys/thread2.h> 94 #include <sys/socketvar2.h> 95 #include <sys/spinlock2.h> 96 97 #include <machine/limits.h> 98 99 #ifdef INET 100 extern int tcp_sosend_agglim; 101 extern int tcp_sosend_async; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static void sodiscard(struct socket *so); 115 static int soclose_sync(struct socket *so, int fflag); 116 static void soclose_fast(struct socket *so); 117 118 static struct filterops solisten_filtops = 119 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 120 static struct filterops soread_filtops = 121 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 122 static struct filterops sowrite_filtops = 123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 124 static struct filterops soexcept_filtops = 125 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 126 127 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 128 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 129 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 130 131 132 static int somaxconn = SOMAXCONN; 133 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 134 &somaxconn, 0, "Maximum pending socket connection queue size"); 135 136 static int use_soclose_fast = 1; 137 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 138 &use_soclose_fast, 0, "Fast socket close"); 139 140 int use_soaccept_pred_fast = 1; 141 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 142 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 143 144 int use_sendfile_async = 1; 145 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 146 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 147 148 /* 149 * Socket operation routines. 150 * These routines are called by the routines in 151 * sys_socket.c or from a system process, and 152 * implement the semantics of socket operations by 153 * switching out to the protocol specific routines. 154 */ 155 156 /* 157 * Get a socket structure, and initialize it. 158 * Note that it would probably be better to allocate socket 159 * and PCB at the same time, but I'm not convinced that all 160 * the protocols can be easily modified to do this. 161 */ 162 struct socket * 163 soalloc(int waitok, struct protosw *pr) 164 { 165 struct socket *so; 166 unsigned waitmask; 167 168 waitmask = waitok ? M_WAITOK : M_NOWAIT; 169 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 170 if (so) { 171 /* XXX race condition for reentrant kernel */ 172 so->so_proto = pr; 173 TAILQ_INIT(&so->so_aiojobq); 174 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 175 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 176 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 177 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 178 spin_init(&so->so_rcvd_spin); 179 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 180 MSGF_DROPABLE | MSGF_PRIORITY, 181 so->so_proto->pr_usrreqs->pru_rcvd); 182 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 183 so->so_state = SS_NOFDREF; 184 so->so_refs = 1; 185 } 186 return so; 187 } 188 189 int 190 socreate(int dom, struct socket **aso, int type, 191 int proto, struct thread *td) 192 { 193 struct proc *p = td->td_proc; 194 struct protosw *prp; 195 struct socket *so; 196 struct pru_attach_info ai; 197 int error; 198 199 if (proto) 200 prp = pffindproto(dom, proto, type); 201 else 202 prp = pffindtype(dom, type); 203 204 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 205 return (EPROTONOSUPPORT); 206 207 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 208 prp->pr_domain->dom_family != PF_LOCAL && 209 prp->pr_domain->dom_family != PF_INET && 210 prp->pr_domain->dom_family != PF_INET6 && 211 prp->pr_domain->dom_family != PF_ROUTE) { 212 return (EPROTONOSUPPORT); 213 } 214 215 if (prp->pr_type != type) 216 return (EPROTOTYPE); 217 so = soalloc(p != NULL, prp); 218 if (so == NULL) 219 return (ENOBUFS); 220 221 /* 222 * Callers of socreate() presumably will connect up a descriptor 223 * and call soclose() if they cannot. This represents our so_refs 224 * (which should be 1) from soalloc(). 225 */ 226 soclrstate(so, SS_NOFDREF); 227 228 /* 229 * Set a default port for protocol processing. No action will occur 230 * on the socket on this port until an inpcb is attached to it and 231 * is able to match incoming packets, or until the socket becomes 232 * available to userland. 233 * 234 * We normally default the socket to the protocol thread on cpu 0. 235 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 236 * thread and all pr_*()/pru_*() calls are executed synchronously. 237 */ 238 if (prp->pr_flags & PR_SYNC_PORT) 239 so->so_port = &netisr_sync_port; 240 else 241 so->so_port = netisr_cpuport(0); 242 243 TAILQ_INIT(&so->so_incomp); 244 TAILQ_INIT(&so->so_comp); 245 so->so_type = type; 246 so->so_cred = crhold(p->p_ucred); 247 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 248 ai.p_ucred = p->p_ucred; 249 ai.fd_rdir = p->p_fd->fd_rdir; 250 251 /* 252 * Auto-sizing of socket buffers is managed by the protocols and 253 * the appropriate flags must be set in the pru_attach function. 254 */ 255 error = so_pru_attach(so, proto, &ai); 256 if (error) { 257 sosetstate(so, SS_NOFDREF); 258 sofree(so); /* from soalloc */ 259 return error; 260 } 261 262 /* 263 * NOTE: Returns referenced socket. 264 */ 265 *aso = so; 266 return (0); 267 } 268 269 int 270 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 271 { 272 int error; 273 274 error = so_pru_bind(so, nam, td); 275 return (error); 276 } 277 278 static void 279 sodealloc(struct socket *so) 280 { 281 if (so->so_rcv.ssb_hiwat) 282 (void)chgsbsize(so->so_cred->cr_uidinfo, 283 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 284 if (so->so_snd.ssb_hiwat) 285 (void)chgsbsize(so->so_cred->cr_uidinfo, 286 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 287 #ifdef INET 288 /* remove accept filter if present */ 289 if (so->so_accf != NULL) 290 do_setopt_accept_filter(so, NULL); 291 #endif /* INET */ 292 crfree(so->so_cred); 293 if (so->so_faddr != NULL) 294 kfree(so->so_faddr, M_SONAME); 295 kfree(so, M_SOCKET); 296 } 297 298 int 299 solisten(struct socket *so, int backlog, struct thread *td) 300 { 301 int error; 302 #ifdef SCTP 303 short oldopt, oldqlimit; 304 #endif /* SCTP */ 305 306 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 307 return (EINVAL); 308 309 #ifdef SCTP 310 oldopt = so->so_options; 311 oldqlimit = so->so_qlimit; 312 #endif /* SCTP */ 313 314 lwkt_gettoken(&so->so_rcv.ssb_token); 315 if (TAILQ_EMPTY(&so->so_comp)) 316 so->so_options |= SO_ACCEPTCONN; 317 lwkt_reltoken(&so->so_rcv.ssb_token); 318 if (backlog < 0 || backlog > somaxconn) 319 backlog = somaxconn; 320 so->so_qlimit = backlog; 321 /* SCTP needs to look at tweak both the inbound backlog parameter AND 322 * the so_options (UDP model both connect's and gets inbound 323 * connections .. implicitly). 324 */ 325 error = so_pru_listen(so, td); 326 if (error) { 327 #ifdef SCTP 328 /* Restore the params */ 329 so->so_options = oldopt; 330 so->so_qlimit = oldqlimit; 331 #endif /* SCTP */ 332 return (error); 333 } 334 return (0); 335 } 336 337 /* 338 * Destroy a disconnected socket. This routine is a NOP if entities 339 * still have a reference on the socket: 340 * 341 * so_pcb - The protocol stack still has a reference 342 * SS_NOFDREF - There is no longer a file pointer reference 343 */ 344 void 345 sofree(struct socket *so) 346 { 347 struct socket *head; 348 349 /* 350 * This is a bit hackish at the moment. We need to interlock 351 * any accept queue we are on before we potentially lose the 352 * last reference to avoid races against a re-reference from 353 * someone operating on the queue. 354 */ 355 while ((head = so->so_head) != NULL) { 356 lwkt_getpooltoken(head); 357 if (so->so_head == head) 358 break; 359 lwkt_relpooltoken(head); 360 } 361 362 /* 363 * Arbitrage the last free. 364 */ 365 KKASSERT(so->so_refs > 0); 366 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 367 if (head) 368 lwkt_relpooltoken(head); 369 return; 370 } 371 372 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 373 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 374 375 /* 376 * We're done, remove ourselves from the accept queue we are 377 * on, if we are on one. 378 */ 379 if (head != NULL) { 380 if (so->so_state & SS_INCOMP) { 381 TAILQ_REMOVE(&head->so_incomp, so, so_list); 382 head->so_incqlen--; 383 } else if (so->so_state & SS_COMP) { 384 /* 385 * We must not decommission a socket that's 386 * on the accept(2) queue. If we do, then 387 * accept(2) may hang after select(2) indicated 388 * that the listening socket was ready. 389 */ 390 lwkt_relpooltoken(head); 391 return; 392 } else { 393 panic("sofree: not queued"); 394 } 395 soclrstate(so, SS_INCOMP); 396 so->so_head = NULL; 397 lwkt_relpooltoken(head); 398 } 399 ssb_release(&so->so_snd, so); 400 sorflush(so); 401 sodealloc(so); 402 } 403 404 /* 405 * Close a socket on last file table reference removal. 406 * Initiate disconnect if connected. 407 * Free socket when disconnect complete. 408 */ 409 int 410 soclose(struct socket *so, int fflag) 411 { 412 int error; 413 414 funsetown(&so->so_sigio); 415 if (!use_soclose_fast || 416 (so->so_proto->pr_flags & PR_SYNC_PORT) || 417 (so->so_options & SO_LINGER)) { 418 error = soclose_sync(so, fflag); 419 } else { 420 soclose_fast(so); 421 error = 0; 422 } 423 return error; 424 } 425 426 static void 427 sodiscard(struct socket *so) 428 { 429 lwkt_getpooltoken(so); 430 if (so->so_options & SO_ACCEPTCONN) { 431 struct socket *sp; 432 433 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 434 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 435 soclrstate(sp, SS_INCOMP); 436 sp->so_head = NULL; 437 so->so_incqlen--; 438 soaborta(sp); 439 } 440 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 441 TAILQ_REMOVE(&so->so_comp, sp, so_list); 442 soclrstate(sp, SS_COMP); 443 sp->so_head = NULL; 444 so->so_qlen--; 445 soaborta(sp); 446 } 447 } 448 lwkt_relpooltoken(so); 449 450 if (so->so_state & SS_NOFDREF) 451 panic("soclose: NOFDREF"); 452 sosetstate(so, SS_NOFDREF); /* take ref */ 453 } 454 455 static int 456 soclose_sync(struct socket *so, int fflag) 457 { 458 int error = 0; 459 460 if (so->so_pcb == NULL) 461 goto discard; 462 if (so->so_state & SS_ISCONNECTED) { 463 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 464 error = sodisconnect(so); 465 if (error) 466 goto drop; 467 } 468 if (so->so_options & SO_LINGER) { 469 if ((so->so_state & SS_ISDISCONNECTING) && 470 (fflag & FNONBLOCK)) 471 goto drop; 472 while (so->so_state & SS_ISCONNECTED) { 473 error = tsleep(&so->so_timeo, PCATCH, 474 "soclos", so->so_linger * hz); 475 if (error) 476 break; 477 } 478 } 479 } 480 drop: 481 if (so->so_pcb) { 482 int error2; 483 484 error2 = so_pru_detach(so); 485 if (error == 0) 486 error = error2; 487 } 488 discard: 489 sodiscard(so); 490 so_pru_sync(so); /* unpend async sending */ 491 sofree(so); /* dispose of ref */ 492 493 return (error); 494 } 495 496 static void 497 soclose_sofree_async_handler(netmsg_t msg) 498 { 499 sofree(msg->base.nm_so); 500 } 501 502 static void 503 soclose_sofree_async(struct socket *so) 504 { 505 struct netmsg_base *base = &so->so_clomsg; 506 507 netmsg_init(base, so, &netisr_apanic_rport, 0, 508 soclose_sofree_async_handler); 509 lwkt_sendmsg(so->so_port, &base->lmsg); 510 } 511 512 static void 513 soclose_disconn_async_handler(netmsg_t msg) 514 { 515 struct socket *so = msg->base.nm_so; 516 517 if ((so->so_state & SS_ISCONNECTED) && 518 (so->so_state & SS_ISDISCONNECTING) == 0) 519 so_pru_disconnect_direct(so); 520 521 if (so->so_pcb) 522 so_pru_detach_direct(so); 523 524 sodiscard(so); 525 sofree(so); 526 } 527 528 static void 529 soclose_disconn_async(struct socket *so) 530 { 531 struct netmsg_base *base = &so->so_clomsg; 532 533 netmsg_init(base, so, &netisr_apanic_rport, 0, 534 soclose_disconn_async_handler); 535 lwkt_sendmsg(so->so_port, &base->lmsg); 536 } 537 538 static void 539 soclose_detach_async_handler(netmsg_t msg) 540 { 541 struct socket *so = msg->base.nm_so; 542 543 if (so->so_pcb) 544 so_pru_detach_direct(so); 545 546 sodiscard(so); 547 sofree(so); 548 } 549 550 static void 551 soclose_detach_async(struct socket *so) 552 { 553 struct netmsg_base *base = &so->so_clomsg; 554 555 netmsg_init(base, so, &netisr_apanic_rport, 0, 556 soclose_detach_async_handler); 557 lwkt_sendmsg(so->so_port, &base->lmsg); 558 } 559 560 static void 561 soclose_fast(struct socket *so) 562 { 563 if (so->so_pcb == NULL) 564 goto discard; 565 566 if ((so->so_state & SS_ISCONNECTED) && 567 (so->so_state & SS_ISDISCONNECTING) == 0) { 568 soclose_disconn_async(so); 569 return; 570 } 571 572 if (so->so_pcb) { 573 soclose_detach_async(so); 574 return; 575 } 576 577 discard: 578 sodiscard(so); 579 soclose_sofree_async(so); 580 } 581 582 /* 583 * Abort and destroy a socket. Only one abort can be in progress 584 * at any given moment. 585 */ 586 void 587 soabort(struct socket *so) 588 { 589 soreference(so); 590 so_pru_abort(so); 591 } 592 593 void 594 soaborta(struct socket *so) 595 { 596 soreference(so); 597 so_pru_aborta(so); 598 } 599 600 void 601 soabort_oncpu(struct socket *so) 602 { 603 soreference(so); 604 so_pru_abort_oncpu(so); 605 } 606 607 /* 608 * so is passed in ref'd, which becomes owned by 609 * the cleared SS_NOFDREF flag. 610 */ 611 void 612 soaccept_generic(struct socket *so) 613 { 614 if ((so->so_state & SS_NOFDREF) == 0) 615 panic("soaccept: !NOFDREF"); 616 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 617 } 618 619 int 620 soaccept(struct socket *so, struct sockaddr **nam) 621 { 622 int error; 623 624 soaccept_generic(so); 625 error = so_pru_accept(so, nam); 626 return (error); 627 } 628 629 int 630 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 631 { 632 int error; 633 634 if (so->so_options & SO_ACCEPTCONN) 635 return (EOPNOTSUPP); 636 /* 637 * If protocol is connection-based, can only connect once. 638 * Otherwise, if connected, try to disconnect first. 639 * This allows user to disconnect by connecting to, e.g., 640 * a null address. 641 */ 642 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 643 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 644 (error = sodisconnect(so)))) { 645 error = EISCONN; 646 } else { 647 /* 648 * Prevent accumulated error from previous connection 649 * from biting us. 650 */ 651 so->so_error = 0; 652 error = so_pru_connect(so, nam, td); 653 } 654 return (error); 655 } 656 657 int 658 soconnect2(struct socket *so1, struct socket *so2) 659 { 660 int error; 661 662 error = so_pru_connect2(so1, so2); 663 return (error); 664 } 665 666 int 667 sodisconnect(struct socket *so) 668 { 669 int error; 670 671 if ((so->so_state & SS_ISCONNECTED) == 0) { 672 error = ENOTCONN; 673 goto bad; 674 } 675 if (so->so_state & SS_ISDISCONNECTING) { 676 error = EALREADY; 677 goto bad; 678 } 679 error = so_pru_disconnect(so); 680 bad: 681 return (error); 682 } 683 684 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 685 /* 686 * Send on a socket. 687 * If send must go all at once and message is larger than 688 * send buffering, then hard error. 689 * Lock against other senders. 690 * If must go all at once and not enough room now, then 691 * inform user that this would block and do nothing. 692 * Otherwise, if nonblocking, send as much as possible. 693 * The data to be sent is described by "uio" if nonzero, 694 * otherwise by the mbuf chain "top" (which must be null 695 * if uio is not). Data provided in mbuf chain must be small 696 * enough to send all at once. 697 * 698 * Returns nonzero on error, timeout or signal; callers 699 * must check for short counts if EINTR/ERESTART are returned. 700 * Data and control buffers are freed on return. 701 */ 702 int 703 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 704 struct mbuf *top, struct mbuf *control, int flags, 705 struct thread *td) 706 { 707 struct mbuf **mp; 708 struct mbuf *m; 709 size_t resid; 710 int space, len; 711 int clen = 0, error, dontroute, mlen; 712 int atomic = sosendallatonce(so) || top; 713 int pru_flags; 714 715 if (uio) { 716 resid = uio->uio_resid; 717 } else { 718 resid = (size_t)top->m_pkthdr.len; 719 #ifdef INVARIANTS 720 len = 0; 721 for (m = top; m; m = m->m_next) 722 len += m->m_len; 723 KKASSERT(top->m_pkthdr.len == len); 724 #endif 725 } 726 727 /* 728 * WARNING! resid is unsigned, space and len are signed. space 729 * can wind up negative if the sockbuf is overcommitted. 730 * 731 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 732 * type sockets since that's an error. 733 */ 734 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 735 error = EINVAL; 736 goto out; 737 } 738 739 dontroute = 740 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 741 (so->so_proto->pr_flags & PR_ATOMIC); 742 if (td->td_lwp != NULL) 743 td->td_lwp->lwp_ru.ru_msgsnd++; 744 if (control) 745 clen = control->m_len; 746 #define gotoerr(errcode) { error = errcode; goto release; } 747 748 restart: 749 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 750 if (error) 751 goto out; 752 753 do { 754 if (so->so_state & SS_CANTSENDMORE) 755 gotoerr(EPIPE); 756 if (so->so_error) { 757 error = so->so_error; 758 so->so_error = 0; 759 goto release; 760 } 761 if ((so->so_state & SS_ISCONNECTED) == 0) { 762 /* 763 * `sendto' and `sendmsg' is allowed on a connection- 764 * based socket if it supports implied connect. 765 * Return ENOTCONN if not connected and no address is 766 * supplied. 767 */ 768 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 769 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 770 if ((so->so_state & SS_ISCONFIRMING) == 0 && 771 !(resid == 0 && clen != 0)) 772 gotoerr(ENOTCONN); 773 } else if (addr == NULL) 774 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 775 ENOTCONN : EDESTADDRREQ); 776 } 777 if ((atomic && resid > so->so_snd.ssb_hiwat) || 778 clen > so->so_snd.ssb_hiwat) { 779 gotoerr(EMSGSIZE); 780 } 781 space = ssb_space(&so->so_snd); 782 if (flags & MSG_OOB) 783 space += 1024; 784 if ((space < 0 || (size_t)space < resid + clen) && uio && 785 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 786 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 787 gotoerr(EWOULDBLOCK); 788 ssb_unlock(&so->so_snd); 789 error = ssb_wait(&so->so_snd); 790 if (error) 791 goto out; 792 goto restart; 793 } 794 mp = ⊤ 795 space -= clen; 796 do { 797 if (uio == NULL) { 798 /* 799 * Data is prepackaged in "top". 800 */ 801 resid = 0; 802 if (flags & MSG_EOR) 803 top->m_flags |= M_EOR; 804 } else do { 805 if (resid > INT_MAX) 806 resid = INT_MAX; 807 m = m_getl((int)resid, MB_WAIT, MT_DATA, 808 top == NULL ? M_PKTHDR : 0, &mlen); 809 if (top == NULL) { 810 m->m_pkthdr.len = 0; 811 m->m_pkthdr.rcvif = NULL; 812 } 813 len = imin((int)szmin(mlen, resid), space); 814 if (resid < MINCLSIZE) { 815 /* 816 * For datagram protocols, leave room 817 * for protocol headers in first mbuf. 818 */ 819 if (atomic && top == NULL && len < mlen) 820 MH_ALIGN(m, len); 821 } 822 space -= len; 823 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 824 resid = uio->uio_resid; 825 m->m_len = len; 826 *mp = m; 827 top->m_pkthdr.len += len; 828 if (error) 829 goto release; 830 mp = &m->m_next; 831 if (resid == 0) { 832 if (flags & MSG_EOR) 833 top->m_flags |= M_EOR; 834 break; 835 } 836 } while (space > 0 && atomic); 837 if (dontroute) 838 so->so_options |= SO_DONTROUTE; 839 if (flags & MSG_OOB) { 840 pru_flags = PRUS_OOB; 841 } else if ((flags & MSG_EOF) && 842 (so->so_proto->pr_flags & PR_IMPLOPCL) && 843 (resid == 0)) { 844 /* 845 * If the user set MSG_EOF, the protocol 846 * understands this flag and nothing left to 847 * send then use PRU_SEND_EOF instead of PRU_SEND. 848 */ 849 pru_flags = PRUS_EOF; 850 } else if (resid > 0 && space > 0) { 851 /* If there is more to send, set PRUS_MORETOCOME */ 852 pru_flags = PRUS_MORETOCOME; 853 } else { 854 pru_flags = 0; 855 } 856 /* 857 * XXX all the SS_CANTSENDMORE checks previously 858 * done could be out of date. We could have recieved 859 * a reset packet in an interrupt or maybe we slept 860 * while doing page faults in uiomove() etc. We could 861 * probably recheck again inside the splnet() protection 862 * here, but there are probably other places that this 863 * also happens. We must rethink this. 864 */ 865 error = so_pru_send(so, pru_flags, top, addr, control, td); 866 if (dontroute) 867 so->so_options &= ~SO_DONTROUTE; 868 clen = 0; 869 control = NULL; 870 top = NULL; 871 mp = ⊤ 872 if (error) 873 goto release; 874 } while (resid && space > 0); 875 } while (resid); 876 877 release: 878 ssb_unlock(&so->so_snd); 879 out: 880 if (top) 881 m_freem(top); 882 if (control) 883 m_freem(control); 884 return (error); 885 } 886 887 #ifdef INET 888 /* 889 * A specialization of sosend() for UDP based on protocol-specific knowledge: 890 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 891 * sosendallatonce() returns true, 892 * the "atomic" variable is true, 893 * and sosendudp() blocks until space is available for the entire send. 894 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 895 * PR_IMPLOPCL flags set. 896 * UDP has no out-of-band data. 897 * UDP has no control data. 898 * UDP does not support MSG_EOR. 899 */ 900 int 901 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 902 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 903 { 904 size_t resid; 905 int error, pru_flags = 0; 906 int space; 907 908 if (td->td_lwp != NULL) 909 td->td_lwp->lwp_ru.ru_msgsnd++; 910 if (control) 911 m_freem(control); 912 913 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 914 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 915 916 restart: 917 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 918 if (error) 919 goto out; 920 921 if (so->so_state & SS_CANTSENDMORE) 922 gotoerr(EPIPE); 923 if (so->so_error) { 924 error = so->so_error; 925 so->so_error = 0; 926 goto release; 927 } 928 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 929 gotoerr(EDESTADDRREQ); 930 if (resid > so->so_snd.ssb_hiwat) 931 gotoerr(EMSGSIZE); 932 space = ssb_space(&so->so_snd); 933 if (uio && (space < 0 || (size_t)space < resid)) { 934 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 935 gotoerr(EWOULDBLOCK); 936 ssb_unlock(&so->so_snd); 937 error = ssb_wait(&so->so_snd); 938 if (error) 939 goto out; 940 goto restart; 941 } 942 943 if (uio) { 944 int hdrlen = max_hdr; 945 946 /* 947 * We try to optimize out the additional mbuf 948 * allocations in M_PREPEND() on output path, e.g. 949 * - udp_output(), when it tries to prepend protocol 950 * headers. 951 * - Link layer output function, when it tries to 952 * prepend link layer header. 953 * 954 * This probably will not benefit any data that will 955 * be fragmented, so this optimization is only performed 956 * when the size of data and max size of protocol+link 957 * headers fit into one mbuf cluster. 958 */ 959 if (uio->uio_resid > MCLBYTES - hdrlen || 960 !udp_sosend_prepend) { 961 top = m_uiomove(uio); 962 if (top == NULL) 963 goto release; 964 } else { 965 int nsize; 966 967 top = m_getl(uio->uio_resid + hdrlen, MB_WAIT, 968 MT_DATA, M_PKTHDR, &nsize); 969 KASSERT(nsize >= uio->uio_resid + hdrlen, 970 ("sosendudp invalid nsize %d, " 971 "resid %zu, hdrlen %d", 972 nsize, uio->uio_resid, hdrlen)); 973 974 top->m_len = uio->uio_resid; 975 top->m_pkthdr.len = uio->uio_resid; 976 top->m_data += hdrlen; 977 978 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 979 if (error) 980 goto out; 981 } 982 } 983 984 if (flags & MSG_DONTROUTE) 985 pru_flags |= PRUS_DONTROUTE; 986 987 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 988 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 989 error = 0; 990 } else { 991 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 992 } 993 top = NULL; /* sent or freed in lower layer */ 994 995 release: 996 ssb_unlock(&so->so_snd); 997 out: 998 if (top) 999 m_freem(top); 1000 return (error); 1001 } 1002 1003 int 1004 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1005 struct mbuf *top, struct mbuf *control, int flags, 1006 struct thread *td) 1007 { 1008 struct mbuf **mp; 1009 struct mbuf *m; 1010 size_t resid; 1011 int space, len; 1012 int error, mlen; 1013 int allatonce; 1014 int pru_flags; 1015 1016 if (uio) { 1017 KKASSERT(top == NULL); 1018 allatonce = 0; 1019 resid = uio->uio_resid; 1020 } else { 1021 allatonce = 1; 1022 resid = (size_t)top->m_pkthdr.len; 1023 #ifdef INVARIANTS 1024 len = 0; 1025 for (m = top; m; m = m->m_next) 1026 len += m->m_len; 1027 KKASSERT(top->m_pkthdr.len == len); 1028 #endif 1029 } 1030 1031 /* 1032 * WARNING! resid is unsigned, space and len are signed. space 1033 * can wind up negative if the sockbuf is overcommitted. 1034 * 1035 * Also check to make sure that MSG_EOR isn't used on TCP 1036 */ 1037 if (flags & MSG_EOR) { 1038 error = EINVAL; 1039 goto out; 1040 } 1041 1042 if (control) { 1043 /* TCP doesn't do control messages (rights, creds, etc) */ 1044 if (control->m_len) { 1045 error = EINVAL; 1046 goto out; 1047 } 1048 m_freem(control); /* empty control, just free it */ 1049 control = NULL; 1050 } 1051 1052 if (td->td_lwp != NULL) 1053 td->td_lwp->lwp_ru.ru_msgsnd++; 1054 1055 #define gotoerr(errcode) { error = errcode; goto release; } 1056 1057 restart: 1058 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1059 if (error) 1060 goto out; 1061 1062 do { 1063 if (so->so_state & SS_CANTSENDMORE) 1064 gotoerr(EPIPE); 1065 if (so->so_error) { 1066 error = so->so_error; 1067 so->so_error = 0; 1068 goto release; 1069 } 1070 if ((so->so_state & SS_ISCONNECTED) == 0 && 1071 (so->so_state & SS_ISCONFIRMING) == 0) 1072 gotoerr(ENOTCONN); 1073 if (allatonce && resid > so->so_snd.ssb_hiwat) 1074 gotoerr(EMSGSIZE); 1075 1076 space = ssb_space_prealloc(&so->so_snd); 1077 if (flags & MSG_OOB) 1078 space += 1024; 1079 if ((space < 0 || (size_t)space < resid) && !allatonce && 1080 space < so->so_snd.ssb_lowat) { 1081 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1082 gotoerr(EWOULDBLOCK); 1083 ssb_unlock(&so->so_snd); 1084 error = ssb_wait(&so->so_snd); 1085 if (error) 1086 goto out; 1087 goto restart; 1088 } 1089 mp = ⊤ 1090 do { 1091 int cnt = 0, async = 0; 1092 1093 if (uio == NULL) { 1094 /* 1095 * Data is prepackaged in "top". 1096 */ 1097 resid = 0; 1098 } else do { 1099 if (resid > INT_MAX) 1100 resid = INT_MAX; 1101 m = m_getl((int)resid, MB_WAIT, MT_DATA, 1102 top == NULL ? M_PKTHDR : 0, &mlen); 1103 if (top == NULL) { 1104 m->m_pkthdr.len = 0; 1105 m->m_pkthdr.rcvif = NULL; 1106 } 1107 len = imin((int)szmin(mlen, resid), space); 1108 space -= len; 1109 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1110 resid = uio->uio_resid; 1111 m->m_len = len; 1112 *mp = m; 1113 top->m_pkthdr.len += len; 1114 if (error) 1115 goto release; 1116 mp = &m->m_next; 1117 if (resid == 0) 1118 break; 1119 ++cnt; 1120 } while (space > 0 && cnt < tcp_sosend_agglim); 1121 1122 if (tcp_sosend_async) 1123 async = 1; 1124 1125 if (flags & MSG_OOB) { 1126 pru_flags = PRUS_OOB; 1127 async = 0; 1128 } else if ((flags & MSG_EOF) && resid == 0) { 1129 pru_flags = PRUS_EOF; 1130 } else if (resid > 0 && space > 0) { 1131 /* If there is more to send, set PRUS_MORETOCOME */ 1132 pru_flags = PRUS_MORETOCOME; 1133 async = 1; 1134 } else { 1135 pru_flags = 0; 1136 } 1137 1138 if (flags & MSG_SYNC) 1139 async = 0; 1140 1141 /* 1142 * XXX all the SS_CANTSENDMORE checks previously 1143 * done could be out of date. We could have recieved 1144 * a reset packet in an interrupt or maybe we slept 1145 * while doing page faults in uiomove() etc. We could 1146 * probably recheck again inside the splnet() protection 1147 * here, but there are probably other places that this 1148 * also happens. We must rethink this. 1149 */ 1150 for (m = top; m; m = m->m_next) 1151 ssb_preallocstream(&so->so_snd, m); 1152 if (!async) { 1153 error = so_pru_send(so, pru_flags, top, 1154 NULL, NULL, td); 1155 } else { 1156 so_pru_send_async(so, pru_flags, top, 1157 NULL, NULL, td); 1158 error = 0; 1159 } 1160 1161 top = NULL; 1162 mp = ⊤ 1163 if (error) 1164 goto release; 1165 } while (resid && space > 0); 1166 } while (resid); 1167 1168 release: 1169 ssb_unlock(&so->so_snd); 1170 out: 1171 if (top) 1172 m_freem(top); 1173 if (control) 1174 m_freem(control); 1175 return (error); 1176 } 1177 #endif 1178 1179 /* 1180 * Implement receive operations on a socket. 1181 * 1182 * We depend on the way that records are added to the signalsockbuf 1183 * by sbappend*. In particular, each record (mbufs linked through m_next) 1184 * must begin with an address if the protocol so specifies, 1185 * followed by an optional mbuf or mbufs containing ancillary data, 1186 * and then zero or more mbufs of data. 1187 * 1188 * Although the signalsockbuf is locked, new data may still be appended. 1189 * A token inside the ssb_lock deals with MP issues and still allows 1190 * the network to access the socket if we block in a uio. 1191 * 1192 * The caller may receive the data as a single mbuf chain by supplying 1193 * an mbuf **mp0 for use in returning the chain. The uio is then used 1194 * only for the count in uio_resid. 1195 */ 1196 int 1197 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1198 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1199 { 1200 struct mbuf *m, *n; 1201 struct mbuf *free_chain = NULL; 1202 int flags, len, error, offset; 1203 struct protosw *pr = so->so_proto; 1204 int moff, type = 0; 1205 size_t resid, orig_resid; 1206 1207 if (uio) 1208 resid = uio->uio_resid; 1209 else 1210 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1211 orig_resid = resid; 1212 1213 if (psa) 1214 *psa = NULL; 1215 if (controlp) 1216 *controlp = NULL; 1217 if (flagsp) 1218 flags = *flagsp &~ MSG_EOR; 1219 else 1220 flags = 0; 1221 if (flags & MSG_OOB) { 1222 m = m_get(MB_WAIT, MT_DATA); 1223 if (m == NULL) 1224 return (ENOBUFS); 1225 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1226 if (error) 1227 goto bad; 1228 if (sio) { 1229 do { 1230 sbappend(sio, m); 1231 KKASSERT(resid >= (size_t)m->m_len); 1232 resid -= (size_t)m->m_len; 1233 } while (resid > 0 && m); 1234 } else { 1235 do { 1236 uio->uio_resid = resid; 1237 error = uiomove(mtod(m, caddr_t), 1238 (int)szmin(resid, m->m_len), 1239 uio); 1240 resid = uio->uio_resid; 1241 m = m_free(m); 1242 } while (uio->uio_resid && error == 0 && m); 1243 } 1244 bad: 1245 if (m) 1246 m_freem(m); 1247 return (error); 1248 } 1249 if ((so->so_state & SS_ISCONFIRMING) && resid) 1250 so_pru_rcvd(so, 0); 1251 1252 /* 1253 * The token interlocks against the protocol thread while 1254 * ssb_lock is a blocking lock against other userland entities. 1255 */ 1256 lwkt_gettoken(&so->so_rcv.ssb_token); 1257 restart: 1258 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1259 if (error) 1260 goto done; 1261 1262 m = so->so_rcv.ssb_mb; 1263 /* 1264 * If we have less data than requested, block awaiting more 1265 * (subject to any timeout) if: 1266 * 1. the current count is less than the low water mark, or 1267 * 2. MSG_WAITALL is set, and it is possible to do the entire 1268 * receive operation at once if we block (resid <= hiwat). 1269 * 3. MSG_DONTWAIT is not set 1270 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1271 * we have to do the receive in sections, and thus risk returning 1272 * a short count if a timeout or signal occurs after we start. 1273 */ 1274 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1275 (size_t)so->so_rcv.ssb_cc < resid) && 1276 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1277 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1278 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1279 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1280 if (so->so_error) { 1281 if (m) 1282 goto dontblock; 1283 error = so->so_error; 1284 if ((flags & MSG_PEEK) == 0) 1285 so->so_error = 0; 1286 goto release; 1287 } 1288 if (so->so_state & SS_CANTRCVMORE) { 1289 if (m) 1290 goto dontblock; 1291 else 1292 goto release; 1293 } 1294 for (; m; m = m->m_next) { 1295 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1296 m = so->so_rcv.ssb_mb; 1297 goto dontblock; 1298 } 1299 } 1300 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1301 (pr->pr_flags & PR_CONNREQUIRED)) { 1302 error = ENOTCONN; 1303 goto release; 1304 } 1305 if (resid == 0) 1306 goto release; 1307 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1308 error = EWOULDBLOCK; 1309 goto release; 1310 } 1311 ssb_unlock(&so->so_rcv); 1312 error = ssb_wait(&so->so_rcv); 1313 if (error) 1314 goto done; 1315 goto restart; 1316 } 1317 dontblock: 1318 if (uio && uio->uio_td && uio->uio_td->td_proc) 1319 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1320 1321 /* 1322 * note: m should be == sb_mb here. Cache the next record while 1323 * cleaning up. Note that calling m_free*() will break out critical 1324 * section. 1325 */ 1326 KKASSERT(m == so->so_rcv.ssb_mb); 1327 1328 /* 1329 * Skip any address mbufs prepending the record. 1330 */ 1331 if (pr->pr_flags & PR_ADDR) { 1332 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1333 orig_resid = 0; 1334 if (psa) 1335 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1336 if (flags & MSG_PEEK) 1337 m = m->m_next; 1338 else 1339 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1340 } 1341 1342 /* 1343 * Skip any control mbufs prepending the record. 1344 */ 1345 #ifdef SCTP 1346 if (pr->pr_flags & PR_ADDR_OPT) { 1347 /* 1348 * For SCTP we may be getting a 1349 * whole message OR a partial delivery. 1350 */ 1351 if (m && m->m_type == MT_SONAME) { 1352 orig_resid = 0; 1353 if (psa) 1354 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1355 if (flags & MSG_PEEK) 1356 m = m->m_next; 1357 else 1358 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1359 } 1360 } 1361 #endif /* SCTP */ 1362 while (m && m->m_type == MT_CONTROL && error == 0) { 1363 if (flags & MSG_PEEK) { 1364 if (controlp) 1365 *controlp = m_copy(m, 0, m->m_len); 1366 m = m->m_next; /* XXX race */ 1367 } else { 1368 if (controlp) { 1369 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1370 if (pr->pr_domain->dom_externalize && 1371 mtod(m, struct cmsghdr *)->cmsg_type == 1372 SCM_RIGHTS) 1373 error = (*pr->pr_domain->dom_externalize)(m); 1374 *controlp = m; 1375 m = n; 1376 } else { 1377 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1378 } 1379 } 1380 if (controlp && *controlp) { 1381 orig_resid = 0; 1382 controlp = &(*controlp)->m_next; 1383 } 1384 } 1385 1386 /* 1387 * flag OOB data. 1388 */ 1389 if (m) { 1390 type = m->m_type; 1391 if (type == MT_OOBDATA) 1392 flags |= MSG_OOB; 1393 } 1394 1395 /* 1396 * Copy to the UIO or mbuf return chain (*mp). 1397 */ 1398 moff = 0; 1399 offset = 0; 1400 while (m && resid > 0 && error == 0) { 1401 if (m->m_type == MT_OOBDATA) { 1402 if (type != MT_OOBDATA) 1403 break; 1404 } else if (type == MT_OOBDATA) 1405 break; 1406 else 1407 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1408 ("receive 3")); 1409 soclrstate(so, SS_RCVATMARK); 1410 len = (resid > INT_MAX) ? INT_MAX : resid; 1411 if (so->so_oobmark && len > so->so_oobmark - offset) 1412 len = so->so_oobmark - offset; 1413 if (len > m->m_len - moff) 1414 len = m->m_len - moff; 1415 1416 /* 1417 * Copy out to the UIO or pass the mbufs back to the SIO. 1418 * The SIO is dealt with when we eat the mbuf, but deal 1419 * with the resid here either way. 1420 */ 1421 if (uio) { 1422 uio->uio_resid = resid; 1423 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1424 resid = uio->uio_resid; 1425 if (error) 1426 goto release; 1427 } else { 1428 resid -= (size_t)len; 1429 } 1430 1431 /* 1432 * Eat the entire mbuf or just a piece of it 1433 */ 1434 if (len == m->m_len - moff) { 1435 if (m->m_flags & M_EOR) 1436 flags |= MSG_EOR; 1437 #ifdef SCTP 1438 if (m->m_flags & M_NOTIFICATION) 1439 flags |= MSG_NOTIFICATION; 1440 #endif /* SCTP */ 1441 if (flags & MSG_PEEK) { 1442 m = m->m_next; 1443 moff = 0; 1444 } else { 1445 if (sio) { 1446 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1447 sbappend(sio, m); 1448 m = n; 1449 } else { 1450 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1451 } 1452 } 1453 } else { 1454 if (flags & MSG_PEEK) { 1455 moff += len; 1456 } else { 1457 if (sio) { 1458 n = m_copym(m, 0, len, MB_WAIT); 1459 if (n) 1460 sbappend(sio, n); 1461 } 1462 m->m_data += len; 1463 m->m_len -= len; 1464 so->so_rcv.ssb_cc -= len; 1465 } 1466 } 1467 if (so->so_oobmark) { 1468 if ((flags & MSG_PEEK) == 0) { 1469 so->so_oobmark -= len; 1470 if (so->so_oobmark == 0) { 1471 sosetstate(so, SS_RCVATMARK); 1472 break; 1473 } 1474 } else { 1475 offset += len; 1476 if (offset == so->so_oobmark) 1477 break; 1478 } 1479 } 1480 if (flags & MSG_EOR) 1481 break; 1482 /* 1483 * If the MSG_WAITALL flag is set (for non-atomic socket), 1484 * we must not quit until resid == 0 or an error 1485 * termination. If a signal/timeout occurs, return 1486 * with a short count but without error. 1487 * Keep signalsockbuf locked against other readers. 1488 */ 1489 while ((flags & MSG_WAITALL) && m == NULL && 1490 resid > 0 && !sosendallatonce(so) && 1491 so->so_rcv.ssb_mb == NULL) { 1492 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1493 break; 1494 /* 1495 * The window might have closed to zero, make 1496 * sure we send an ack now that we've drained 1497 * the buffer or we might end up blocking until 1498 * the idle takes over (5 seconds). 1499 */ 1500 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1501 so_pru_rcvd(so, flags); 1502 error = ssb_wait(&so->so_rcv); 1503 if (error) { 1504 ssb_unlock(&so->so_rcv); 1505 error = 0; 1506 goto done; 1507 } 1508 m = so->so_rcv.ssb_mb; 1509 } 1510 } 1511 1512 /* 1513 * If an atomic read was requested but unread data still remains 1514 * in the record, set MSG_TRUNC. 1515 */ 1516 if (m && pr->pr_flags & PR_ATOMIC) 1517 flags |= MSG_TRUNC; 1518 1519 /* 1520 * Cleanup. If an atomic read was requested drop any unread data. 1521 */ 1522 if ((flags & MSG_PEEK) == 0) { 1523 if (m && (pr->pr_flags & PR_ATOMIC)) 1524 sbdroprecord(&so->so_rcv.sb); 1525 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1526 so_pru_rcvd(so, flags); 1527 } 1528 1529 if (orig_resid == resid && orig_resid && 1530 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1531 ssb_unlock(&so->so_rcv); 1532 goto restart; 1533 } 1534 1535 if (flagsp) 1536 *flagsp |= flags; 1537 release: 1538 ssb_unlock(&so->so_rcv); 1539 done: 1540 lwkt_reltoken(&so->so_rcv.ssb_token); 1541 if (free_chain) 1542 m_freem(free_chain); 1543 return (error); 1544 } 1545 1546 int 1547 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1548 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1549 { 1550 struct mbuf *m, *n; 1551 struct mbuf *free_chain = NULL; 1552 int flags, len, error, offset; 1553 struct protosw *pr = so->so_proto; 1554 int moff; 1555 size_t resid, orig_resid; 1556 1557 if (uio) 1558 resid = uio->uio_resid; 1559 else 1560 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1561 orig_resid = resid; 1562 1563 if (psa) 1564 *psa = NULL; 1565 if (controlp) 1566 *controlp = NULL; 1567 if (flagsp) 1568 flags = *flagsp &~ MSG_EOR; 1569 else 1570 flags = 0; 1571 if (flags & MSG_OOB) { 1572 m = m_get(MB_WAIT, MT_DATA); 1573 if (m == NULL) 1574 return (ENOBUFS); 1575 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1576 if (error) 1577 goto bad; 1578 if (sio) { 1579 do { 1580 sbappend(sio, m); 1581 KKASSERT(resid >= (size_t)m->m_len); 1582 resid -= (size_t)m->m_len; 1583 } while (resid > 0 && m); 1584 } else { 1585 do { 1586 uio->uio_resid = resid; 1587 error = uiomove(mtod(m, caddr_t), 1588 (int)szmin(resid, m->m_len), 1589 uio); 1590 resid = uio->uio_resid; 1591 m = m_free(m); 1592 } while (uio->uio_resid && error == 0 && m); 1593 } 1594 bad: 1595 if (m) 1596 m_freem(m); 1597 return (error); 1598 } 1599 1600 /* 1601 * The token interlocks against the protocol thread while 1602 * ssb_lock is a blocking lock against other userland entities. 1603 */ 1604 lwkt_gettoken(&so->so_rcv.ssb_token); 1605 restart: 1606 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1607 if (error) 1608 goto done; 1609 1610 m = so->so_rcv.ssb_mb; 1611 /* 1612 * If we have less data than requested, block awaiting more 1613 * (subject to any timeout) if: 1614 * 1. the current count is less than the low water mark, or 1615 * 2. MSG_WAITALL is set, and it is possible to do the entire 1616 * receive operation at once if we block (resid <= hiwat). 1617 * 3. MSG_DONTWAIT is not set 1618 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1619 * we have to do the receive in sections, and thus risk returning 1620 * a short count if a timeout or signal occurs after we start. 1621 */ 1622 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1623 (size_t)so->so_rcv.ssb_cc < resid) && 1624 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1625 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1626 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1627 if (so->so_error) { 1628 if (m) 1629 goto dontblock; 1630 error = so->so_error; 1631 if ((flags & MSG_PEEK) == 0) 1632 so->so_error = 0; 1633 goto release; 1634 } 1635 if (so->so_state & SS_CANTRCVMORE) { 1636 if (m) 1637 goto dontblock; 1638 else 1639 goto release; 1640 } 1641 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1642 (pr->pr_flags & PR_CONNREQUIRED)) { 1643 error = ENOTCONN; 1644 goto release; 1645 } 1646 if (resid == 0) 1647 goto release; 1648 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1649 error = EWOULDBLOCK; 1650 goto release; 1651 } 1652 ssb_unlock(&so->so_rcv); 1653 error = ssb_wait(&so->so_rcv); 1654 if (error) 1655 goto done; 1656 goto restart; 1657 } 1658 dontblock: 1659 if (uio && uio->uio_td && uio->uio_td->td_proc) 1660 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1661 1662 /* 1663 * note: m should be == sb_mb here. Cache the next record while 1664 * cleaning up. Note that calling m_free*() will break out critical 1665 * section. 1666 */ 1667 KKASSERT(m == so->so_rcv.ssb_mb); 1668 1669 /* 1670 * Copy to the UIO or mbuf return chain (*mp). 1671 */ 1672 moff = 0; 1673 offset = 0; 1674 while (m && resid > 0 && error == 0) { 1675 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1676 ("receive 3")); 1677 1678 soclrstate(so, SS_RCVATMARK); 1679 len = (resid > INT_MAX) ? INT_MAX : resid; 1680 if (so->so_oobmark && len > so->so_oobmark - offset) 1681 len = so->so_oobmark - offset; 1682 if (len > m->m_len - moff) 1683 len = m->m_len - moff; 1684 1685 /* 1686 * Copy out to the UIO or pass the mbufs back to the SIO. 1687 * The SIO is dealt with when we eat the mbuf, but deal 1688 * with the resid here either way. 1689 */ 1690 if (uio) { 1691 uio->uio_resid = resid; 1692 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1693 resid = uio->uio_resid; 1694 if (error) 1695 goto release; 1696 } else { 1697 resid -= (size_t)len; 1698 } 1699 1700 /* 1701 * Eat the entire mbuf or just a piece of it 1702 */ 1703 if (len == m->m_len - moff) { 1704 if (flags & MSG_PEEK) { 1705 m = m->m_next; 1706 moff = 0; 1707 } else { 1708 if (sio) { 1709 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1710 sbappend(sio, m); 1711 m = n; 1712 } else { 1713 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1714 } 1715 } 1716 } else { 1717 if (flags & MSG_PEEK) { 1718 moff += len; 1719 } else { 1720 if (sio) { 1721 n = m_copym(m, 0, len, MB_WAIT); 1722 if (n) 1723 sbappend(sio, n); 1724 } 1725 m->m_data += len; 1726 m->m_len -= len; 1727 so->so_rcv.ssb_cc -= len; 1728 } 1729 } 1730 if (so->so_oobmark) { 1731 if ((flags & MSG_PEEK) == 0) { 1732 so->so_oobmark -= len; 1733 if (so->so_oobmark == 0) { 1734 sosetstate(so, SS_RCVATMARK); 1735 break; 1736 } 1737 } else { 1738 offset += len; 1739 if (offset == so->so_oobmark) 1740 break; 1741 } 1742 } 1743 /* 1744 * If the MSG_WAITALL flag is set (for non-atomic socket), 1745 * we must not quit until resid == 0 or an error 1746 * termination. If a signal/timeout occurs, return 1747 * with a short count but without error. 1748 * Keep signalsockbuf locked against other readers. 1749 */ 1750 while ((flags & MSG_WAITALL) && m == NULL && 1751 resid > 0 && !sosendallatonce(so) && 1752 so->so_rcv.ssb_mb == NULL) { 1753 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1754 break; 1755 /* 1756 * The window might have closed to zero, make 1757 * sure we send an ack now that we've drained 1758 * the buffer or we might end up blocking until 1759 * the idle takes over (5 seconds). 1760 */ 1761 if (so->so_pcb) 1762 so_pru_rcvd_async(so); 1763 error = ssb_wait(&so->so_rcv); 1764 if (error) { 1765 ssb_unlock(&so->so_rcv); 1766 error = 0; 1767 goto done; 1768 } 1769 m = so->so_rcv.ssb_mb; 1770 } 1771 } 1772 1773 /* 1774 * Cleanup. If an atomic read was requested drop any unread data. 1775 */ 1776 if ((flags & MSG_PEEK) == 0) { 1777 if (so->so_pcb) 1778 so_pru_rcvd_async(so); 1779 } 1780 1781 if (orig_resid == resid && orig_resid && 1782 (so->so_state & SS_CANTRCVMORE) == 0) { 1783 ssb_unlock(&so->so_rcv); 1784 goto restart; 1785 } 1786 1787 if (flagsp) 1788 *flagsp |= flags; 1789 release: 1790 ssb_unlock(&so->so_rcv); 1791 done: 1792 lwkt_reltoken(&so->so_rcv.ssb_token); 1793 if (free_chain) 1794 m_freem(free_chain); 1795 return (error); 1796 } 1797 1798 /* 1799 * Shut a socket down. Note that we do not get a frontend lock as we 1800 * want to be able to shut the socket down even if another thread is 1801 * blocked in a read(), thus waking it up. 1802 */ 1803 int 1804 soshutdown(struct socket *so, int how) 1805 { 1806 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1807 return (EINVAL); 1808 1809 if (how != SHUT_WR) { 1810 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1811 sorflush(so); 1812 /*ssb_unlock(&so->so_rcv);*/ 1813 } 1814 if (how != SHUT_RD) 1815 return (so_pru_shutdown(so)); 1816 return (0); 1817 } 1818 1819 void 1820 sorflush(struct socket *so) 1821 { 1822 struct signalsockbuf *ssb = &so->so_rcv; 1823 struct protosw *pr = so->so_proto; 1824 struct signalsockbuf asb; 1825 1826 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1827 1828 lwkt_gettoken(&ssb->ssb_token); 1829 socantrcvmore(so); 1830 asb = *ssb; 1831 1832 /* 1833 * Can't just blow up the ssb structure here 1834 */ 1835 bzero(&ssb->sb, sizeof(ssb->sb)); 1836 ssb->ssb_timeo = 0; 1837 ssb->ssb_lowat = 0; 1838 ssb->ssb_hiwat = 0; 1839 ssb->ssb_mbmax = 0; 1840 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1841 1842 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1843 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1844 ssb_release(&asb, so); 1845 1846 lwkt_reltoken(&ssb->ssb_token); 1847 } 1848 1849 #ifdef INET 1850 static int 1851 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1852 { 1853 struct accept_filter_arg *afap = NULL; 1854 struct accept_filter *afp; 1855 struct so_accf *af = so->so_accf; 1856 int error = 0; 1857 1858 /* do not set/remove accept filters on non listen sockets */ 1859 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1860 error = EINVAL; 1861 goto out; 1862 } 1863 1864 /* removing the filter */ 1865 if (sopt == NULL) { 1866 if (af != NULL) { 1867 if (af->so_accept_filter != NULL && 1868 af->so_accept_filter->accf_destroy != NULL) { 1869 af->so_accept_filter->accf_destroy(so); 1870 } 1871 if (af->so_accept_filter_str != NULL) { 1872 kfree(af->so_accept_filter_str, M_ACCF); 1873 } 1874 kfree(af, M_ACCF); 1875 so->so_accf = NULL; 1876 } 1877 so->so_options &= ~SO_ACCEPTFILTER; 1878 return (0); 1879 } 1880 /* adding a filter */ 1881 /* must remove previous filter first */ 1882 if (af != NULL) { 1883 error = EINVAL; 1884 goto out; 1885 } 1886 /* don't put large objects on the kernel stack */ 1887 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 1888 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1889 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1890 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1891 if (error) 1892 goto out; 1893 afp = accept_filt_get(afap->af_name); 1894 if (afp == NULL) { 1895 error = ENOENT; 1896 goto out; 1897 } 1898 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1899 if (afp->accf_create != NULL) { 1900 if (afap->af_name[0] != '\0') { 1901 int len = strlen(afap->af_name) + 1; 1902 1903 af->so_accept_filter_str = kmalloc(len, M_ACCF, 1904 M_WAITOK); 1905 strcpy(af->so_accept_filter_str, afap->af_name); 1906 } 1907 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1908 if (af->so_accept_filter_arg == NULL) { 1909 kfree(af->so_accept_filter_str, M_ACCF); 1910 kfree(af, M_ACCF); 1911 so->so_accf = NULL; 1912 error = EINVAL; 1913 goto out; 1914 } 1915 } 1916 af->so_accept_filter = afp; 1917 so->so_accf = af; 1918 so->so_options |= SO_ACCEPTFILTER; 1919 out: 1920 if (afap != NULL) 1921 kfree(afap, M_TEMP); 1922 return (error); 1923 } 1924 #endif /* INET */ 1925 1926 /* 1927 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1928 * an additional variant to handle the case where the option value needs 1929 * to be some kind of integer, but not a specific size. 1930 * In addition to their use here, these functions are also called by the 1931 * protocol-level pr_ctloutput() routines. 1932 */ 1933 int 1934 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1935 { 1936 return soopt_to_kbuf(sopt, buf, len, minlen); 1937 } 1938 1939 int 1940 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1941 { 1942 size_t valsize; 1943 1944 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1945 KKASSERT(kva_p(buf)); 1946 1947 /* 1948 * If the user gives us more than we wanted, we ignore it, 1949 * but if we don't get the minimum length the caller 1950 * wants, we return EINVAL. On success, sopt->sopt_valsize 1951 * is set to however much we actually retrieved. 1952 */ 1953 if ((valsize = sopt->sopt_valsize) < minlen) 1954 return EINVAL; 1955 if (valsize > len) 1956 sopt->sopt_valsize = valsize = len; 1957 1958 bcopy(sopt->sopt_val, buf, valsize); 1959 return 0; 1960 } 1961 1962 1963 int 1964 sosetopt(struct socket *so, struct sockopt *sopt) 1965 { 1966 int error, optval; 1967 struct linger l; 1968 struct timeval tv; 1969 u_long val; 1970 struct signalsockbuf *sotmp; 1971 1972 error = 0; 1973 sopt->sopt_dir = SOPT_SET; 1974 if (sopt->sopt_level != SOL_SOCKET) { 1975 if (so->so_proto && so->so_proto->pr_ctloutput) { 1976 return (so_pr_ctloutput(so, sopt)); 1977 } 1978 error = ENOPROTOOPT; 1979 } else { 1980 switch (sopt->sopt_name) { 1981 #ifdef INET 1982 case SO_ACCEPTFILTER: 1983 error = do_setopt_accept_filter(so, sopt); 1984 if (error) 1985 goto bad; 1986 break; 1987 #endif /* INET */ 1988 case SO_LINGER: 1989 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1990 if (error) 1991 goto bad; 1992 1993 so->so_linger = l.l_linger; 1994 if (l.l_onoff) 1995 so->so_options |= SO_LINGER; 1996 else 1997 so->so_options &= ~SO_LINGER; 1998 break; 1999 2000 case SO_DEBUG: 2001 case SO_KEEPALIVE: 2002 case SO_DONTROUTE: 2003 case SO_USELOOPBACK: 2004 case SO_BROADCAST: 2005 case SO_REUSEADDR: 2006 case SO_REUSEPORT: 2007 case SO_OOBINLINE: 2008 case SO_TIMESTAMP: 2009 case SO_NOSIGPIPE: 2010 error = sooptcopyin(sopt, &optval, sizeof optval, 2011 sizeof optval); 2012 if (error) 2013 goto bad; 2014 if (optval) 2015 so->so_options |= sopt->sopt_name; 2016 else 2017 so->so_options &= ~sopt->sopt_name; 2018 break; 2019 2020 case SO_SNDBUF: 2021 case SO_RCVBUF: 2022 case SO_SNDLOWAT: 2023 case SO_RCVLOWAT: 2024 error = sooptcopyin(sopt, &optval, sizeof optval, 2025 sizeof optval); 2026 if (error) 2027 goto bad; 2028 2029 /* 2030 * Values < 1 make no sense for any of these 2031 * options, so disallow them. 2032 */ 2033 if (optval < 1) { 2034 error = EINVAL; 2035 goto bad; 2036 } 2037 2038 switch (sopt->sopt_name) { 2039 case SO_SNDBUF: 2040 case SO_RCVBUF: 2041 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2042 &so->so_snd : &so->so_rcv, (u_long)optval, 2043 so, 2044 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2045 error = ENOBUFS; 2046 goto bad; 2047 } 2048 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2049 &so->so_snd : &so->so_rcv; 2050 atomic_clear_int(&sotmp->ssb_flags, 2051 SSB_AUTOSIZE); 2052 break; 2053 2054 /* 2055 * Make sure the low-water is never greater than 2056 * the high-water. 2057 */ 2058 case SO_SNDLOWAT: 2059 so->so_snd.ssb_lowat = 2060 (optval > so->so_snd.ssb_hiwat) ? 2061 so->so_snd.ssb_hiwat : optval; 2062 atomic_clear_int(&so->so_snd.ssb_flags, 2063 SSB_AUTOLOWAT); 2064 break; 2065 case SO_RCVLOWAT: 2066 so->so_rcv.ssb_lowat = 2067 (optval > so->so_rcv.ssb_hiwat) ? 2068 so->so_rcv.ssb_hiwat : optval; 2069 atomic_clear_int(&so->so_rcv.ssb_flags, 2070 SSB_AUTOLOWAT); 2071 break; 2072 } 2073 break; 2074 2075 case SO_SNDTIMEO: 2076 case SO_RCVTIMEO: 2077 error = sooptcopyin(sopt, &tv, sizeof tv, 2078 sizeof tv); 2079 if (error) 2080 goto bad; 2081 2082 /* assert(hz > 0); */ 2083 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2084 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2085 error = EDOM; 2086 goto bad; 2087 } 2088 /* assert(tick > 0); */ 2089 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2090 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2091 if (val > INT_MAX) { 2092 error = EDOM; 2093 goto bad; 2094 } 2095 if (val == 0 && tv.tv_usec != 0) 2096 val = 1; 2097 2098 switch (sopt->sopt_name) { 2099 case SO_SNDTIMEO: 2100 so->so_snd.ssb_timeo = val; 2101 break; 2102 case SO_RCVTIMEO: 2103 so->so_rcv.ssb_timeo = val; 2104 break; 2105 } 2106 break; 2107 default: 2108 error = ENOPROTOOPT; 2109 break; 2110 } 2111 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2112 (void) so_pr_ctloutput(so, sopt); 2113 } 2114 } 2115 bad: 2116 return (error); 2117 } 2118 2119 /* Helper routine for getsockopt */ 2120 int 2121 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2122 { 2123 soopt_from_kbuf(sopt, buf, len); 2124 return 0; 2125 } 2126 2127 void 2128 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2129 { 2130 size_t valsize; 2131 2132 if (len == 0) { 2133 sopt->sopt_valsize = 0; 2134 return; 2135 } 2136 2137 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2138 KKASSERT(kva_p(buf)); 2139 2140 /* 2141 * Documented get behavior is that we always return a value, 2142 * possibly truncated to fit in the user's buffer. 2143 * Traditional behavior is that we always tell the user 2144 * precisely how much we copied, rather than something useful 2145 * like the total amount we had available for her. 2146 * Note that this interface is not idempotent; the entire answer must 2147 * generated ahead of time. 2148 */ 2149 valsize = szmin(len, sopt->sopt_valsize); 2150 sopt->sopt_valsize = valsize; 2151 if (sopt->sopt_val != 0) { 2152 bcopy(buf, sopt->sopt_val, valsize); 2153 } 2154 } 2155 2156 int 2157 sogetopt(struct socket *so, struct sockopt *sopt) 2158 { 2159 int error, optval; 2160 long optval_l; 2161 struct linger l; 2162 struct timeval tv; 2163 #ifdef INET 2164 struct accept_filter_arg *afap; 2165 #endif 2166 2167 error = 0; 2168 sopt->sopt_dir = SOPT_GET; 2169 if (sopt->sopt_level != SOL_SOCKET) { 2170 if (so->so_proto && so->so_proto->pr_ctloutput) { 2171 return (so_pr_ctloutput(so, sopt)); 2172 } else 2173 return (ENOPROTOOPT); 2174 } else { 2175 switch (sopt->sopt_name) { 2176 #ifdef INET 2177 case SO_ACCEPTFILTER: 2178 if ((so->so_options & SO_ACCEPTCONN) == 0) 2179 return (EINVAL); 2180 afap = kmalloc(sizeof(*afap), M_TEMP, 2181 M_WAITOK | M_ZERO); 2182 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2183 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2184 if (so->so_accf->so_accept_filter_str != NULL) 2185 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2186 } 2187 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2188 kfree(afap, M_TEMP); 2189 break; 2190 #endif /* INET */ 2191 2192 case SO_LINGER: 2193 l.l_onoff = so->so_options & SO_LINGER; 2194 l.l_linger = so->so_linger; 2195 error = sooptcopyout(sopt, &l, sizeof l); 2196 break; 2197 2198 case SO_USELOOPBACK: 2199 case SO_DONTROUTE: 2200 case SO_DEBUG: 2201 case SO_KEEPALIVE: 2202 case SO_REUSEADDR: 2203 case SO_REUSEPORT: 2204 case SO_BROADCAST: 2205 case SO_OOBINLINE: 2206 case SO_TIMESTAMP: 2207 case SO_NOSIGPIPE: 2208 optval = so->so_options & sopt->sopt_name; 2209 integer: 2210 error = sooptcopyout(sopt, &optval, sizeof optval); 2211 break; 2212 2213 case SO_TYPE: 2214 optval = so->so_type; 2215 goto integer; 2216 2217 case SO_ERROR: 2218 optval = so->so_error; 2219 so->so_error = 0; 2220 goto integer; 2221 2222 case SO_SNDBUF: 2223 optval = so->so_snd.ssb_hiwat; 2224 goto integer; 2225 2226 case SO_RCVBUF: 2227 optval = so->so_rcv.ssb_hiwat; 2228 goto integer; 2229 2230 case SO_SNDLOWAT: 2231 optval = so->so_snd.ssb_lowat; 2232 goto integer; 2233 2234 case SO_RCVLOWAT: 2235 optval = so->so_rcv.ssb_lowat; 2236 goto integer; 2237 2238 case SO_SNDTIMEO: 2239 case SO_RCVTIMEO: 2240 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2241 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2242 2243 tv.tv_sec = optval / hz; 2244 tv.tv_usec = (optval % hz) * ustick; 2245 error = sooptcopyout(sopt, &tv, sizeof tv); 2246 break; 2247 2248 case SO_SNDSPACE: 2249 optval_l = ssb_space(&so->so_snd); 2250 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2251 break; 2252 2253 default: 2254 error = ENOPROTOOPT; 2255 break; 2256 } 2257 return (error); 2258 } 2259 } 2260 2261 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2262 int 2263 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2264 { 2265 struct mbuf *m, *m_prev; 2266 int sopt_size = sopt->sopt_valsize, msize; 2267 2268 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA, 2269 0, &msize); 2270 if (m == NULL) 2271 return (ENOBUFS); 2272 m->m_len = min(msize, sopt_size); 2273 sopt_size -= m->m_len; 2274 *mp = m; 2275 m_prev = m; 2276 2277 while (sopt_size > 0) { 2278 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, 2279 MT_DATA, 0, &msize); 2280 if (m == NULL) { 2281 m_freem(*mp); 2282 return (ENOBUFS); 2283 } 2284 m->m_len = min(msize, sopt_size); 2285 sopt_size -= m->m_len; 2286 m_prev->m_next = m; 2287 m_prev = m; 2288 } 2289 return (0); 2290 } 2291 2292 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2293 int 2294 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2295 { 2296 soopt_to_mbuf(sopt, m); 2297 return 0; 2298 } 2299 2300 void 2301 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2302 { 2303 size_t valsize; 2304 void *val; 2305 2306 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2307 KKASSERT(kva_p(m)); 2308 if (sopt->sopt_val == NULL) 2309 return; 2310 val = sopt->sopt_val; 2311 valsize = sopt->sopt_valsize; 2312 while (m != NULL && valsize >= m->m_len) { 2313 bcopy(val, mtod(m, char *), m->m_len); 2314 valsize -= m->m_len; 2315 val = (caddr_t)val + m->m_len; 2316 m = m->m_next; 2317 } 2318 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2319 panic("ip6_sooptmcopyin"); 2320 } 2321 2322 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2323 int 2324 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2325 { 2326 return soopt_from_mbuf(sopt, m); 2327 } 2328 2329 int 2330 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2331 { 2332 struct mbuf *m0 = m; 2333 size_t valsize = 0; 2334 size_t maxsize; 2335 void *val; 2336 2337 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2338 KKASSERT(kva_p(m)); 2339 if (sopt->sopt_val == NULL) 2340 return 0; 2341 val = sopt->sopt_val; 2342 maxsize = sopt->sopt_valsize; 2343 while (m != NULL && maxsize >= m->m_len) { 2344 bcopy(mtod(m, char *), val, m->m_len); 2345 maxsize -= m->m_len; 2346 val = (caddr_t)val + m->m_len; 2347 valsize += m->m_len; 2348 m = m->m_next; 2349 } 2350 if (m != NULL) { 2351 /* enough soopt buffer should be given from user-land */ 2352 m_freem(m0); 2353 return (EINVAL); 2354 } 2355 sopt->sopt_valsize = valsize; 2356 return 0; 2357 } 2358 2359 void 2360 sohasoutofband(struct socket *so) 2361 { 2362 if (so->so_sigio != NULL) 2363 pgsigio(so->so_sigio, SIGURG, 0); 2364 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2365 } 2366 2367 int 2368 sokqfilter(struct file *fp, struct knote *kn) 2369 { 2370 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2371 struct signalsockbuf *ssb; 2372 2373 switch (kn->kn_filter) { 2374 case EVFILT_READ: 2375 if (so->so_options & SO_ACCEPTCONN) 2376 kn->kn_fop = &solisten_filtops; 2377 else 2378 kn->kn_fop = &soread_filtops; 2379 ssb = &so->so_rcv; 2380 break; 2381 case EVFILT_WRITE: 2382 kn->kn_fop = &sowrite_filtops; 2383 ssb = &so->so_snd; 2384 break; 2385 case EVFILT_EXCEPT: 2386 kn->kn_fop = &soexcept_filtops; 2387 ssb = &so->so_rcv; 2388 break; 2389 default: 2390 return (EOPNOTSUPP); 2391 } 2392 2393 knote_insert(&ssb->ssb_kq.ki_note, kn); 2394 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2395 return (0); 2396 } 2397 2398 static void 2399 filt_sordetach(struct knote *kn) 2400 { 2401 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2402 2403 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2404 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2405 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2406 } 2407 2408 /*ARGSUSED*/ 2409 static int 2410 filt_soread(struct knote *kn, long hint) 2411 { 2412 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2413 2414 if (kn->kn_sfflags & NOTE_OOB) { 2415 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2416 kn->kn_fflags |= NOTE_OOB; 2417 return (1); 2418 } 2419 return (0); 2420 } 2421 kn->kn_data = so->so_rcv.ssb_cc; 2422 2423 if (so->so_state & SS_CANTRCVMORE) { 2424 /* 2425 * Only set NODATA if all data has been exhausted. 2426 */ 2427 if (kn->kn_data == 0) 2428 kn->kn_flags |= EV_NODATA; 2429 kn->kn_flags |= EV_EOF; 2430 kn->kn_fflags = so->so_error; 2431 return (1); 2432 } 2433 if (so->so_error) /* temporary udp error */ 2434 return (1); 2435 if (kn->kn_sfflags & NOTE_LOWAT) 2436 return (kn->kn_data >= kn->kn_sdata); 2437 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2438 !TAILQ_EMPTY(&so->so_comp)); 2439 } 2440 2441 static void 2442 filt_sowdetach(struct knote *kn) 2443 { 2444 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2445 2446 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2447 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2448 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2449 } 2450 2451 /*ARGSUSED*/ 2452 static int 2453 filt_sowrite(struct knote *kn, long hint) 2454 { 2455 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2456 2457 kn->kn_data = ssb_space(&so->so_snd); 2458 if (so->so_state & SS_CANTSENDMORE) { 2459 kn->kn_flags |= (EV_EOF | EV_NODATA); 2460 kn->kn_fflags = so->so_error; 2461 return (1); 2462 } 2463 if (so->so_error) /* temporary udp error */ 2464 return (1); 2465 if (((so->so_state & SS_ISCONNECTED) == 0) && 2466 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2467 return (0); 2468 if (kn->kn_sfflags & NOTE_LOWAT) 2469 return (kn->kn_data >= kn->kn_sdata); 2470 return (kn->kn_data >= so->so_snd.ssb_lowat); 2471 } 2472 2473 /*ARGSUSED*/ 2474 static int 2475 filt_solisten(struct knote *kn, long hint) 2476 { 2477 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2478 2479 kn->kn_data = so->so_qlen; 2480 return (! TAILQ_EMPTY(&so->so_comp)); 2481 } 2482