1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 68 */ 69 70 #include "opt_inet.h" 71 #include "opt_sctp.h" 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/fcntl.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/domain.h> 79 #include <sys/file.h> /* for struct knote */ 80 #include <sys/kernel.h> 81 #include <sys/event.h> 82 #include <sys/proc.h> 83 #include <sys/protosw.h> 84 #include <sys/socket.h> 85 #include <sys/socketvar.h> 86 #include <sys/socketops.h> 87 #include <sys/resourcevar.h> 88 #include <sys/signalvar.h> 89 #include <sys/sysctl.h> 90 #include <sys/uio.h> 91 #include <sys/jail.h> 92 #include <vm/vm_zone.h> 93 #include <vm/pmap.h> 94 #include <net/netmsg2.h> 95 96 #include <sys/thread2.h> 97 #include <sys/socketvar2.h> 98 #include <sys/spinlock2.h> 99 100 #include <machine/limits.h> 101 102 extern int tcp_sosend_agglim; 103 extern int tcp_sosend_async; 104 extern int udp_sosend_async; 105 106 #ifdef INET 107 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 108 #endif /* INET */ 109 110 static void filt_sordetach(struct knote *kn); 111 static int filt_soread(struct knote *kn, long hint); 112 static void filt_sowdetach(struct knote *kn); 113 static int filt_sowrite(struct knote *kn, long hint); 114 static int filt_solisten(struct knote *kn, long hint); 115 116 static void sodiscard(struct socket *so); 117 static int soclose_sync(struct socket *so, int fflag); 118 static void soclose_fast(struct socket *so); 119 120 static struct filterops solisten_filtops = 121 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 122 static struct filterops soread_filtops = 123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 124 static struct filterops sowrite_filtops = 125 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 126 static struct filterops soexcept_filtops = 127 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 128 129 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 130 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 131 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 132 133 134 static int somaxconn = SOMAXCONN; 135 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 136 &somaxconn, 0, "Maximum pending socket connection queue size"); 137 138 static int use_soclose_fast = 1; 139 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 140 &use_soclose_fast, 0, "Fast socket close"); 141 142 int use_soaccept_pred_fast = 1; 143 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 144 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 145 146 int use_sendfile_async = 1; 147 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 148 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 149 150 /* 151 * Socket operation routines. 152 * These routines are called by the routines in 153 * sys_socket.c or from a system process, and 154 * implement the semantics of socket operations by 155 * switching out to the protocol specific routines. 156 */ 157 158 /* 159 * Get a socket structure, and initialize it. 160 * Note that it would probably be better to allocate socket 161 * and PCB at the same time, but I'm not convinced that all 162 * the protocols can be easily modified to do this. 163 */ 164 struct socket * 165 soalloc(int waitok, struct protosw *pr) 166 { 167 struct socket *so; 168 unsigned waitmask; 169 170 waitmask = waitok ? M_WAITOK : M_NOWAIT; 171 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 172 if (so) { 173 /* XXX race condition for reentrant kernel */ 174 so->so_proto = pr; 175 TAILQ_INIT(&so->so_aiojobq); 176 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 177 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 178 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 179 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 180 spin_init(&so->so_rcvd_spin); 181 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 182 MSGF_DROPABLE, so->so_proto->pr_usrreqs->pru_rcvd); 183 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 184 so->so_state = SS_NOFDREF; 185 so->so_refs = 1; 186 } 187 return so; 188 } 189 190 int 191 socreate(int dom, struct socket **aso, int type, 192 int proto, struct thread *td) 193 { 194 struct proc *p = td->td_proc; 195 struct protosw *prp; 196 struct socket *so; 197 struct pru_attach_info ai; 198 int error; 199 200 if (proto) 201 prp = pffindproto(dom, proto, type); 202 else 203 prp = pffindtype(dom, type); 204 205 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 206 return (EPROTONOSUPPORT); 207 208 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 209 prp->pr_domain->dom_family != PF_LOCAL && 210 prp->pr_domain->dom_family != PF_INET && 211 prp->pr_domain->dom_family != PF_INET6 && 212 prp->pr_domain->dom_family != PF_ROUTE) { 213 return (EPROTONOSUPPORT); 214 } 215 216 if (prp->pr_type != type) 217 return (EPROTOTYPE); 218 so = soalloc(p != NULL, prp); 219 if (so == NULL) 220 return (ENOBUFS); 221 222 /* 223 * Callers of socreate() presumably will connect up a descriptor 224 * and call soclose() if they cannot. This represents our so_refs 225 * (which should be 1) from soalloc(). 226 */ 227 soclrstate(so, SS_NOFDREF); 228 229 /* 230 * Set a default port for protocol processing. No action will occur 231 * on the socket on this port until an inpcb is attached to it and 232 * is able to match incoming packets, or until the socket becomes 233 * available to userland. 234 * 235 * We normally default the socket to the protocol thread on cpu 0. 236 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 237 * thread and all pr_*()/pru_*() calls are executed synchronously. 238 */ 239 if (prp->pr_flags & PR_SYNC_PORT) 240 so->so_port = &netisr_sync_port; 241 else 242 so->so_port = netisr_portfn(0); 243 244 TAILQ_INIT(&so->so_incomp); 245 TAILQ_INIT(&so->so_comp); 246 so->so_type = type; 247 so->so_cred = crhold(p->p_ucred); 248 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 249 ai.p_ucred = p->p_ucred; 250 ai.fd_rdir = p->p_fd->fd_rdir; 251 252 /* 253 * Auto-sizing of socket buffers is managed by the protocols and 254 * the appropriate flags must be set in the pru_attach function. 255 */ 256 error = so_pru_attach(so, proto, &ai); 257 if (error) { 258 sosetstate(so, SS_NOFDREF); 259 sofree(so); /* from soalloc */ 260 return error; 261 } 262 263 /* 264 * NOTE: Returns referenced socket. 265 */ 266 *aso = so; 267 return (0); 268 } 269 270 int 271 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 272 { 273 int error; 274 275 error = so_pru_bind(so, nam, td); 276 return (error); 277 } 278 279 static void 280 sodealloc(struct socket *so) 281 { 282 if (so->so_rcv.ssb_hiwat) 283 (void)chgsbsize(so->so_cred->cr_uidinfo, 284 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 285 if (so->so_snd.ssb_hiwat) 286 (void)chgsbsize(so->so_cred->cr_uidinfo, 287 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 288 #ifdef INET 289 /* remove accept filter if present */ 290 if (so->so_accf != NULL) 291 do_setopt_accept_filter(so, NULL); 292 #endif /* INET */ 293 crfree(so->so_cred); 294 if (so->so_faddr != NULL) 295 kfree(so->so_faddr, M_SONAME); 296 kfree(so, M_SOCKET); 297 } 298 299 int 300 solisten(struct socket *so, int backlog, struct thread *td) 301 { 302 int error; 303 #ifdef SCTP 304 short oldopt, oldqlimit; 305 #endif /* SCTP */ 306 307 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 308 return (EINVAL); 309 310 #ifdef SCTP 311 oldopt = so->so_options; 312 oldqlimit = so->so_qlimit; 313 #endif /* SCTP */ 314 315 lwkt_gettoken(&so->so_rcv.ssb_token); 316 if (TAILQ_EMPTY(&so->so_comp)) 317 so->so_options |= SO_ACCEPTCONN; 318 lwkt_reltoken(&so->so_rcv.ssb_token); 319 if (backlog < 0 || backlog > somaxconn) 320 backlog = somaxconn; 321 so->so_qlimit = backlog; 322 /* SCTP needs to look at tweak both the inbound backlog parameter AND 323 * the so_options (UDP model both connect's and gets inbound 324 * connections .. implicitly). 325 */ 326 error = so_pru_listen(so, td); 327 if (error) { 328 #ifdef SCTP 329 /* Restore the params */ 330 so->so_options = oldopt; 331 so->so_qlimit = oldqlimit; 332 #endif /* SCTP */ 333 return (error); 334 } 335 return (0); 336 } 337 338 /* 339 * Destroy a disconnected socket. This routine is a NOP if entities 340 * still have a reference on the socket: 341 * 342 * so_pcb - The protocol stack still has a reference 343 * SS_NOFDREF - There is no longer a file pointer reference 344 */ 345 void 346 sofree(struct socket *so) 347 { 348 struct socket *head; 349 350 /* 351 * This is a bit hackish at the moment. We need to interlock 352 * any accept queue we are on before we potentially lose the 353 * last reference to avoid races against a re-reference from 354 * someone operating on the queue. 355 */ 356 while ((head = so->so_head) != NULL) { 357 lwkt_getpooltoken(head); 358 if (so->so_head == head) 359 break; 360 lwkt_relpooltoken(head); 361 } 362 363 /* 364 * Arbitrage the last free. 365 */ 366 KKASSERT(so->so_refs > 0); 367 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 368 if (head) 369 lwkt_relpooltoken(head); 370 return; 371 } 372 373 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 374 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 375 376 /* 377 * We're done, remove ourselves from the accept queue we are 378 * on, if we are on one. 379 */ 380 if (head != NULL) { 381 if (so->so_state & SS_INCOMP) { 382 TAILQ_REMOVE(&head->so_incomp, so, so_list); 383 head->so_incqlen--; 384 } else if (so->so_state & SS_COMP) { 385 /* 386 * We must not decommission a socket that's 387 * on the accept(2) queue. If we do, then 388 * accept(2) may hang after select(2) indicated 389 * that the listening socket was ready. 390 */ 391 lwkt_relpooltoken(head); 392 return; 393 } else { 394 panic("sofree: not queued"); 395 } 396 soclrstate(so, SS_INCOMP); 397 so->so_head = NULL; 398 lwkt_relpooltoken(head); 399 } 400 ssb_release(&so->so_snd, so); 401 sorflush(so); 402 sodealloc(so); 403 } 404 405 /* 406 * Close a socket on last file table reference removal. 407 * Initiate disconnect if connected. 408 * Free socket when disconnect complete. 409 */ 410 int 411 soclose(struct socket *so, int fflag) 412 { 413 int error; 414 415 funsetown(&so->so_sigio); 416 if (!use_soclose_fast || 417 (so->so_proto->pr_flags & PR_SYNC_PORT) || 418 (so->so_options & SO_LINGER)) { 419 error = soclose_sync(so, fflag); 420 } else { 421 soclose_fast(so); 422 error = 0; 423 } 424 return error; 425 } 426 427 static void 428 sodiscard(struct socket *so) 429 { 430 lwkt_getpooltoken(so); 431 if (so->so_options & SO_ACCEPTCONN) { 432 struct socket *sp; 433 434 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 435 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 436 soclrstate(sp, SS_INCOMP); 437 sp->so_head = NULL; 438 so->so_incqlen--; 439 soaborta(sp); 440 } 441 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 442 TAILQ_REMOVE(&so->so_comp, sp, so_list); 443 soclrstate(sp, SS_COMP); 444 sp->so_head = NULL; 445 so->so_qlen--; 446 soaborta(sp); 447 } 448 } 449 lwkt_relpooltoken(so); 450 451 if (so->so_state & SS_NOFDREF) 452 panic("soclose: NOFDREF"); 453 sosetstate(so, SS_NOFDREF); /* take ref */ 454 } 455 456 static int 457 soclose_sync(struct socket *so, int fflag) 458 { 459 int error = 0; 460 461 if (so->so_pcb == NULL) 462 goto discard; 463 if (so->so_state & SS_ISCONNECTED) { 464 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 465 error = sodisconnect(so); 466 if (error) 467 goto drop; 468 } 469 if (so->so_options & SO_LINGER) { 470 if ((so->so_state & SS_ISDISCONNECTING) && 471 (fflag & FNONBLOCK)) 472 goto drop; 473 while (so->so_state & SS_ISCONNECTED) { 474 error = tsleep(&so->so_timeo, PCATCH, 475 "soclos", so->so_linger * hz); 476 if (error) 477 break; 478 } 479 } 480 } 481 drop: 482 if (so->so_pcb) { 483 int error2; 484 485 error2 = so_pru_detach(so); 486 if (error == 0) 487 error = error2; 488 } 489 discard: 490 sodiscard(so); 491 so_pru_sync(so); /* unpend async sending */ 492 sofree(so); /* dispose of ref */ 493 494 return (error); 495 } 496 497 static void 498 soclose_sofree_async_handler(netmsg_t msg) 499 { 500 sofree(msg->base.nm_so); 501 } 502 503 static void 504 soclose_sofree_async(struct socket *so) 505 { 506 struct netmsg_base *base = &so->so_clomsg; 507 508 netmsg_init(base, so, &netisr_apanic_rport, 0, 509 soclose_sofree_async_handler); 510 lwkt_sendmsg(so->so_port, &base->lmsg); 511 } 512 513 static void 514 soclose_disconn_async_handler(netmsg_t msg) 515 { 516 struct socket *so = msg->base.nm_so; 517 518 if ((so->so_state & SS_ISCONNECTED) && 519 (so->so_state & SS_ISDISCONNECTING) == 0) 520 so_pru_disconnect_direct(so); 521 522 if (so->so_pcb) 523 so_pru_detach_direct(so); 524 525 sodiscard(so); 526 sofree(so); 527 } 528 529 static void 530 soclose_disconn_async(struct socket *so) 531 { 532 struct netmsg_base *base = &so->so_clomsg; 533 534 netmsg_init(base, so, &netisr_apanic_rport, 0, 535 soclose_disconn_async_handler); 536 lwkt_sendmsg(so->so_port, &base->lmsg); 537 } 538 539 static void 540 soclose_detach_async_handler(netmsg_t msg) 541 { 542 struct socket *so = msg->base.nm_so; 543 544 if (so->so_pcb) 545 so_pru_detach_direct(so); 546 547 sodiscard(so); 548 sofree(so); 549 } 550 551 static void 552 soclose_detach_async(struct socket *so) 553 { 554 struct netmsg_base *base = &so->so_clomsg; 555 556 netmsg_init(base, so, &netisr_apanic_rport, 0, 557 soclose_detach_async_handler); 558 lwkt_sendmsg(so->so_port, &base->lmsg); 559 } 560 561 static void 562 soclose_fast(struct socket *so) 563 { 564 if (so->so_pcb == NULL) 565 goto discard; 566 567 if ((so->so_state & SS_ISCONNECTED) && 568 (so->so_state & SS_ISDISCONNECTING) == 0) { 569 soclose_disconn_async(so); 570 return; 571 } 572 573 if (so->so_pcb) { 574 soclose_detach_async(so); 575 return; 576 } 577 578 discard: 579 sodiscard(so); 580 soclose_sofree_async(so); 581 } 582 583 /* 584 * Abort and destroy a socket. Only one abort can be in progress 585 * at any given moment. 586 */ 587 void 588 soabort(struct socket *so) 589 { 590 soreference(so); 591 so_pru_abort(so); 592 } 593 594 void 595 soaborta(struct socket *so) 596 { 597 soreference(so); 598 so_pru_aborta(so); 599 } 600 601 void 602 soabort_oncpu(struct socket *so) 603 { 604 soreference(so); 605 so_pru_abort_oncpu(so); 606 } 607 608 /* 609 * so is passed in ref'd, which becomes owned by 610 * the cleared SS_NOFDREF flag. 611 */ 612 void 613 soaccept_generic(struct socket *so) 614 { 615 if ((so->so_state & SS_NOFDREF) == 0) 616 panic("soaccept: !NOFDREF"); 617 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 618 } 619 620 int 621 soaccept(struct socket *so, struct sockaddr **nam) 622 { 623 int error; 624 625 soaccept_generic(so); 626 error = so_pru_accept(so, nam); 627 return (error); 628 } 629 630 int 631 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 632 { 633 int error; 634 635 if (so->so_options & SO_ACCEPTCONN) 636 return (EOPNOTSUPP); 637 /* 638 * If protocol is connection-based, can only connect once. 639 * Otherwise, if connected, try to disconnect first. 640 * This allows user to disconnect by connecting to, e.g., 641 * a null address. 642 */ 643 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 644 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 645 (error = sodisconnect(so)))) { 646 error = EISCONN; 647 } else { 648 /* 649 * Prevent accumulated error from previous connection 650 * from biting us. 651 */ 652 so->so_error = 0; 653 error = so_pru_connect(so, nam, td); 654 } 655 return (error); 656 } 657 658 int 659 soconnect2(struct socket *so1, struct socket *so2) 660 { 661 int error; 662 663 error = so_pru_connect2(so1, so2); 664 return (error); 665 } 666 667 int 668 sodisconnect(struct socket *so) 669 { 670 int error; 671 672 if ((so->so_state & SS_ISCONNECTED) == 0) { 673 error = ENOTCONN; 674 goto bad; 675 } 676 if (so->so_state & SS_ISDISCONNECTING) { 677 error = EALREADY; 678 goto bad; 679 } 680 error = so_pru_disconnect(so); 681 bad: 682 return (error); 683 } 684 685 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 686 /* 687 * Send on a socket. 688 * If send must go all at once and message is larger than 689 * send buffering, then hard error. 690 * Lock against other senders. 691 * If must go all at once and not enough room now, then 692 * inform user that this would block and do nothing. 693 * Otherwise, if nonblocking, send as much as possible. 694 * The data to be sent is described by "uio" if nonzero, 695 * otherwise by the mbuf chain "top" (which must be null 696 * if uio is not). Data provided in mbuf chain must be small 697 * enough to send all at once. 698 * 699 * Returns nonzero on error, timeout or signal; callers 700 * must check for short counts if EINTR/ERESTART are returned. 701 * Data and control buffers are freed on return. 702 */ 703 int 704 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 705 struct mbuf *top, struct mbuf *control, int flags, 706 struct thread *td) 707 { 708 struct mbuf **mp; 709 struct mbuf *m; 710 size_t resid; 711 int space, len; 712 int clen = 0, error, dontroute, mlen; 713 int atomic = sosendallatonce(so) || top; 714 int pru_flags; 715 716 if (uio) { 717 resid = uio->uio_resid; 718 } else { 719 resid = (size_t)top->m_pkthdr.len; 720 #ifdef INVARIANTS 721 len = 0; 722 for (m = top; m; m = m->m_next) 723 len += m->m_len; 724 KKASSERT(top->m_pkthdr.len == len); 725 #endif 726 } 727 728 /* 729 * WARNING! resid is unsigned, space and len are signed. space 730 * can wind up negative if the sockbuf is overcommitted. 731 * 732 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 733 * type sockets since that's an error. 734 */ 735 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 736 error = EINVAL; 737 goto out; 738 } 739 740 dontroute = 741 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 742 (so->so_proto->pr_flags & PR_ATOMIC); 743 if (td->td_lwp != NULL) 744 td->td_lwp->lwp_ru.ru_msgsnd++; 745 if (control) 746 clen = control->m_len; 747 #define gotoerr(errcode) { error = errcode; goto release; } 748 749 restart: 750 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 751 if (error) 752 goto out; 753 754 do { 755 if (so->so_state & SS_CANTSENDMORE) 756 gotoerr(EPIPE); 757 if (so->so_error) { 758 error = so->so_error; 759 so->so_error = 0; 760 goto release; 761 } 762 if ((so->so_state & SS_ISCONNECTED) == 0) { 763 /* 764 * `sendto' and `sendmsg' is allowed on a connection- 765 * based socket if it supports implied connect. 766 * Return ENOTCONN if not connected and no address is 767 * supplied. 768 */ 769 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 770 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 771 if ((so->so_state & SS_ISCONFIRMING) == 0 && 772 !(resid == 0 && clen != 0)) 773 gotoerr(ENOTCONN); 774 } else if (addr == NULL) 775 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 776 ENOTCONN : EDESTADDRREQ); 777 } 778 if ((atomic && resid > so->so_snd.ssb_hiwat) || 779 clen > so->so_snd.ssb_hiwat) { 780 gotoerr(EMSGSIZE); 781 } 782 space = ssb_space(&so->so_snd); 783 if (flags & MSG_OOB) 784 space += 1024; 785 if ((space < 0 || (size_t)space < resid + clen) && uio && 786 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 787 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 788 gotoerr(EWOULDBLOCK); 789 ssb_unlock(&so->so_snd); 790 error = ssb_wait(&so->so_snd); 791 if (error) 792 goto out; 793 goto restart; 794 } 795 mp = ⊤ 796 space -= clen; 797 do { 798 if (uio == NULL) { 799 /* 800 * Data is prepackaged in "top". 801 */ 802 resid = 0; 803 if (flags & MSG_EOR) 804 top->m_flags |= M_EOR; 805 } else do { 806 if (resid > INT_MAX) 807 resid = INT_MAX; 808 m = m_getl((int)resid, MB_WAIT, MT_DATA, 809 top == NULL ? M_PKTHDR : 0, &mlen); 810 if (top == NULL) { 811 m->m_pkthdr.len = 0; 812 m->m_pkthdr.rcvif = NULL; 813 } 814 len = imin((int)szmin(mlen, resid), space); 815 if (resid < MINCLSIZE) { 816 /* 817 * For datagram protocols, leave room 818 * for protocol headers in first mbuf. 819 */ 820 if (atomic && top == NULL && len < mlen) 821 MH_ALIGN(m, len); 822 } 823 space -= len; 824 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 825 resid = uio->uio_resid; 826 m->m_len = len; 827 *mp = m; 828 top->m_pkthdr.len += len; 829 if (error) 830 goto release; 831 mp = &m->m_next; 832 if (resid == 0) { 833 if (flags & MSG_EOR) 834 top->m_flags |= M_EOR; 835 break; 836 } 837 } while (space > 0 && atomic); 838 if (dontroute) 839 so->so_options |= SO_DONTROUTE; 840 if (flags & MSG_OOB) { 841 pru_flags = PRUS_OOB; 842 } else if ((flags & MSG_EOF) && 843 (so->so_proto->pr_flags & PR_IMPLOPCL) && 844 (resid == 0)) { 845 /* 846 * If the user set MSG_EOF, the protocol 847 * understands this flag and nothing left to 848 * send then use PRU_SEND_EOF instead of PRU_SEND. 849 */ 850 pru_flags = PRUS_EOF; 851 } else if (resid > 0 && space > 0) { 852 /* If there is more to send, set PRUS_MORETOCOME */ 853 pru_flags = PRUS_MORETOCOME; 854 } else { 855 pru_flags = 0; 856 } 857 /* 858 * XXX all the SS_CANTSENDMORE checks previously 859 * done could be out of date. We could have recieved 860 * a reset packet in an interrupt or maybe we slept 861 * while doing page faults in uiomove() etc. We could 862 * probably recheck again inside the splnet() protection 863 * here, but there are probably other places that this 864 * also happens. We must rethink this. 865 */ 866 error = so_pru_send(so, pru_flags, top, addr, control, td); 867 if (dontroute) 868 so->so_options &= ~SO_DONTROUTE; 869 clen = 0; 870 control = NULL; 871 top = NULL; 872 mp = ⊤ 873 if (error) 874 goto release; 875 } while (resid && space > 0); 876 } while (resid); 877 878 release: 879 ssb_unlock(&so->so_snd); 880 out: 881 if (top) 882 m_freem(top); 883 if (control) 884 m_freem(control); 885 return (error); 886 } 887 888 /* 889 * A specialization of sosend() for UDP based on protocol-specific knowledge: 890 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 891 * sosendallatonce() returns true, 892 * the "atomic" variable is true, 893 * and sosendudp() blocks until space is available for the entire send. 894 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 895 * PR_IMPLOPCL flags set. 896 * UDP has no out-of-band data. 897 * UDP has no control data. 898 * UDP does not support MSG_EOR. 899 */ 900 int 901 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 902 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 903 { 904 size_t resid; 905 int error, pru_flags = 0; 906 int space; 907 908 if (td->td_lwp != NULL) 909 td->td_lwp->lwp_ru.ru_msgsnd++; 910 if (control) 911 m_freem(control); 912 913 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 914 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 915 916 restart: 917 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 918 if (error) 919 goto out; 920 921 if (so->so_state & SS_CANTSENDMORE) 922 gotoerr(EPIPE); 923 if (so->so_error) { 924 error = so->so_error; 925 so->so_error = 0; 926 goto release; 927 } 928 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 929 gotoerr(EDESTADDRREQ); 930 if (resid > so->so_snd.ssb_hiwat) 931 gotoerr(EMSGSIZE); 932 space = ssb_space(&so->so_snd); 933 if (uio && (space < 0 || (size_t)space < resid)) { 934 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 935 gotoerr(EWOULDBLOCK); 936 ssb_unlock(&so->so_snd); 937 error = ssb_wait(&so->so_snd); 938 if (error) 939 goto out; 940 goto restart; 941 } 942 943 if (uio) { 944 top = m_uiomove(uio); 945 if (top == NULL) 946 goto release; 947 } 948 949 if (flags & MSG_DONTROUTE) 950 pru_flags |= PRUS_DONTROUTE; 951 952 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 953 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 954 error = 0; 955 } else { 956 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 957 } 958 top = NULL; /* sent or freed in lower layer */ 959 960 release: 961 ssb_unlock(&so->so_snd); 962 out: 963 if (top) 964 m_freem(top); 965 return (error); 966 } 967 968 int 969 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 970 struct mbuf *top, struct mbuf *control, int flags, 971 struct thread *td) 972 { 973 struct mbuf **mp; 974 struct mbuf *m; 975 size_t resid; 976 int space, len; 977 int error, mlen; 978 int allatonce; 979 int pru_flags; 980 981 if (uio) { 982 KKASSERT(top == NULL); 983 allatonce = 0; 984 resid = uio->uio_resid; 985 } else { 986 allatonce = 1; 987 resid = (size_t)top->m_pkthdr.len; 988 #ifdef INVARIANTS 989 len = 0; 990 for (m = top; m; m = m->m_next) 991 len += m->m_len; 992 KKASSERT(top->m_pkthdr.len == len); 993 #endif 994 } 995 996 /* 997 * WARNING! resid is unsigned, space and len are signed. space 998 * can wind up negative if the sockbuf is overcommitted. 999 * 1000 * Also check to make sure that MSG_EOR isn't used on TCP 1001 */ 1002 if (flags & MSG_EOR) { 1003 error = EINVAL; 1004 goto out; 1005 } 1006 1007 if (control) { 1008 /* TCP doesn't do control messages (rights, creds, etc) */ 1009 if (control->m_len) { 1010 error = EINVAL; 1011 goto out; 1012 } 1013 m_freem(control); /* empty control, just free it */ 1014 control = NULL; 1015 } 1016 1017 if (td->td_lwp != NULL) 1018 td->td_lwp->lwp_ru.ru_msgsnd++; 1019 1020 #define gotoerr(errcode) { error = errcode; goto release; } 1021 1022 restart: 1023 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1024 if (error) 1025 goto out; 1026 1027 do { 1028 if (so->so_state & SS_CANTSENDMORE) 1029 gotoerr(EPIPE); 1030 if (so->so_error) { 1031 error = so->so_error; 1032 so->so_error = 0; 1033 goto release; 1034 } 1035 if ((so->so_state & SS_ISCONNECTED) == 0 && 1036 (so->so_state & SS_ISCONFIRMING) == 0) 1037 gotoerr(ENOTCONN); 1038 if (allatonce && resid > so->so_snd.ssb_hiwat) 1039 gotoerr(EMSGSIZE); 1040 1041 space = ssb_space_prealloc(&so->so_snd); 1042 if (flags & MSG_OOB) 1043 space += 1024; 1044 if ((space < 0 || (size_t)space < resid) && !allatonce && 1045 space < so->so_snd.ssb_lowat) { 1046 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1047 gotoerr(EWOULDBLOCK); 1048 ssb_unlock(&so->so_snd); 1049 error = ssb_wait(&so->so_snd); 1050 if (error) 1051 goto out; 1052 goto restart; 1053 } 1054 mp = ⊤ 1055 do { 1056 int cnt = 0, async = 0; 1057 1058 if (uio == NULL) { 1059 /* 1060 * Data is prepackaged in "top". 1061 */ 1062 resid = 0; 1063 } else do { 1064 if (resid > INT_MAX) 1065 resid = INT_MAX; 1066 m = m_getl((int)resid, MB_WAIT, MT_DATA, 1067 top == NULL ? M_PKTHDR : 0, &mlen); 1068 if (top == NULL) { 1069 m->m_pkthdr.len = 0; 1070 m->m_pkthdr.rcvif = NULL; 1071 } 1072 len = imin((int)szmin(mlen, resid), space); 1073 space -= len; 1074 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1075 resid = uio->uio_resid; 1076 m->m_len = len; 1077 *mp = m; 1078 top->m_pkthdr.len += len; 1079 if (error) 1080 goto release; 1081 mp = &m->m_next; 1082 if (resid == 0) 1083 break; 1084 ++cnt; 1085 } while (space > 0 && cnt < tcp_sosend_agglim); 1086 1087 if (tcp_sosend_async) 1088 async = 1; 1089 1090 if (flags & MSG_OOB) { 1091 pru_flags = PRUS_OOB; 1092 async = 0; 1093 } else if ((flags & MSG_EOF) && resid == 0) { 1094 pru_flags = PRUS_EOF; 1095 } else if (resid > 0 && space > 0) { 1096 /* If there is more to send, set PRUS_MORETOCOME */ 1097 pru_flags = PRUS_MORETOCOME; 1098 async = 1; 1099 } else { 1100 pru_flags = 0; 1101 } 1102 1103 if (flags & MSG_SYNC) 1104 async = 0; 1105 1106 /* 1107 * XXX all the SS_CANTSENDMORE checks previously 1108 * done could be out of date. We could have recieved 1109 * a reset packet in an interrupt or maybe we slept 1110 * while doing page faults in uiomove() etc. We could 1111 * probably recheck again inside the splnet() protection 1112 * here, but there are probably other places that this 1113 * also happens. We must rethink this. 1114 */ 1115 for (m = top; m; m = m->m_next) 1116 ssb_preallocstream(&so->so_snd, m); 1117 if (!async) { 1118 error = so_pru_send(so, pru_flags, top, 1119 NULL, NULL, td); 1120 } else { 1121 so_pru_send_async(so, pru_flags, top, 1122 NULL, NULL, td); 1123 error = 0; 1124 } 1125 1126 top = NULL; 1127 mp = ⊤ 1128 if (error) 1129 goto release; 1130 } while (resid && space > 0); 1131 } while (resid); 1132 1133 release: 1134 ssb_unlock(&so->so_snd); 1135 out: 1136 if (top) 1137 m_freem(top); 1138 if (control) 1139 m_freem(control); 1140 return (error); 1141 } 1142 1143 /* 1144 * Implement receive operations on a socket. 1145 * 1146 * We depend on the way that records are added to the signalsockbuf 1147 * by sbappend*. In particular, each record (mbufs linked through m_next) 1148 * must begin with an address if the protocol so specifies, 1149 * followed by an optional mbuf or mbufs containing ancillary data, 1150 * and then zero or more mbufs of data. 1151 * 1152 * Although the signalsockbuf is locked, new data may still be appended. 1153 * A token inside the ssb_lock deals with MP issues and still allows 1154 * the network to access the socket if we block in a uio. 1155 * 1156 * The caller may receive the data as a single mbuf chain by supplying 1157 * an mbuf **mp0 for use in returning the chain. The uio is then used 1158 * only for the count in uio_resid. 1159 */ 1160 int 1161 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1162 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1163 { 1164 struct mbuf *m, *n; 1165 struct mbuf *free_chain = NULL; 1166 int flags, len, error, offset; 1167 struct protosw *pr = so->so_proto; 1168 int moff, type = 0; 1169 size_t resid, orig_resid; 1170 1171 if (uio) 1172 resid = uio->uio_resid; 1173 else 1174 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1175 orig_resid = resid; 1176 1177 if (psa) 1178 *psa = NULL; 1179 if (controlp) 1180 *controlp = NULL; 1181 if (flagsp) 1182 flags = *flagsp &~ MSG_EOR; 1183 else 1184 flags = 0; 1185 if (flags & MSG_OOB) { 1186 m = m_get(MB_WAIT, MT_DATA); 1187 if (m == NULL) 1188 return (ENOBUFS); 1189 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1190 if (error) 1191 goto bad; 1192 if (sio) { 1193 do { 1194 sbappend(sio, m); 1195 KKASSERT(resid >= (size_t)m->m_len); 1196 resid -= (size_t)m->m_len; 1197 } while (resid > 0 && m); 1198 } else { 1199 do { 1200 uio->uio_resid = resid; 1201 error = uiomove(mtod(m, caddr_t), 1202 (int)szmin(resid, m->m_len), 1203 uio); 1204 resid = uio->uio_resid; 1205 m = m_free(m); 1206 } while (uio->uio_resid && error == 0 && m); 1207 } 1208 bad: 1209 if (m) 1210 m_freem(m); 1211 return (error); 1212 } 1213 if ((so->so_state & SS_ISCONFIRMING) && resid) 1214 so_pru_rcvd(so, 0); 1215 1216 /* 1217 * The token interlocks against the protocol thread while 1218 * ssb_lock is a blocking lock against other userland entities. 1219 */ 1220 lwkt_gettoken(&so->so_rcv.ssb_token); 1221 restart: 1222 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1223 if (error) 1224 goto done; 1225 1226 m = so->so_rcv.ssb_mb; 1227 /* 1228 * If we have less data than requested, block awaiting more 1229 * (subject to any timeout) if: 1230 * 1. the current count is less than the low water mark, or 1231 * 2. MSG_WAITALL is set, and it is possible to do the entire 1232 * receive operation at once if we block (resid <= hiwat). 1233 * 3. MSG_DONTWAIT is not set 1234 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1235 * we have to do the receive in sections, and thus risk returning 1236 * a short count if a timeout or signal occurs after we start. 1237 */ 1238 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1239 (size_t)so->so_rcv.ssb_cc < resid) && 1240 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1241 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1242 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1243 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1244 if (so->so_error) { 1245 if (m) 1246 goto dontblock; 1247 error = so->so_error; 1248 if ((flags & MSG_PEEK) == 0) 1249 so->so_error = 0; 1250 goto release; 1251 } 1252 if (so->so_state & SS_CANTRCVMORE) { 1253 if (m) 1254 goto dontblock; 1255 else 1256 goto release; 1257 } 1258 for (; m; m = m->m_next) { 1259 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1260 m = so->so_rcv.ssb_mb; 1261 goto dontblock; 1262 } 1263 } 1264 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1265 (pr->pr_flags & PR_CONNREQUIRED)) { 1266 error = ENOTCONN; 1267 goto release; 1268 } 1269 if (resid == 0) 1270 goto release; 1271 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1272 error = EWOULDBLOCK; 1273 goto release; 1274 } 1275 ssb_unlock(&so->so_rcv); 1276 error = ssb_wait(&so->so_rcv); 1277 if (error) 1278 goto done; 1279 goto restart; 1280 } 1281 dontblock: 1282 if (uio && uio->uio_td && uio->uio_td->td_proc) 1283 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1284 1285 /* 1286 * note: m should be == sb_mb here. Cache the next record while 1287 * cleaning up. Note that calling m_free*() will break out critical 1288 * section. 1289 */ 1290 KKASSERT(m == so->so_rcv.ssb_mb); 1291 1292 /* 1293 * Skip any address mbufs prepending the record. 1294 */ 1295 if (pr->pr_flags & PR_ADDR) { 1296 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1297 orig_resid = 0; 1298 if (psa) 1299 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1300 if (flags & MSG_PEEK) 1301 m = m->m_next; 1302 else 1303 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1304 } 1305 1306 /* 1307 * Skip any control mbufs prepending the record. 1308 */ 1309 #ifdef SCTP 1310 if (pr->pr_flags & PR_ADDR_OPT) { 1311 /* 1312 * For SCTP we may be getting a 1313 * whole message OR a partial delivery. 1314 */ 1315 if (m && m->m_type == MT_SONAME) { 1316 orig_resid = 0; 1317 if (psa) 1318 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1319 if (flags & MSG_PEEK) 1320 m = m->m_next; 1321 else 1322 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1323 } 1324 } 1325 #endif /* SCTP */ 1326 while (m && m->m_type == MT_CONTROL && error == 0) { 1327 if (flags & MSG_PEEK) { 1328 if (controlp) 1329 *controlp = m_copy(m, 0, m->m_len); 1330 m = m->m_next; /* XXX race */ 1331 } else { 1332 if (controlp) { 1333 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1334 if (pr->pr_domain->dom_externalize && 1335 mtod(m, struct cmsghdr *)->cmsg_type == 1336 SCM_RIGHTS) 1337 error = (*pr->pr_domain->dom_externalize)(m); 1338 *controlp = m; 1339 m = n; 1340 } else { 1341 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1342 } 1343 } 1344 if (controlp && *controlp) { 1345 orig_resid = 0; 1346 controlp = &(*controlp)->m_next; 1347 } 1348 } 1349 1350 /* 1351 * flag OOB data. 1352 */ 1353 if (m) { 1354 type = m->m_type; 1355 if (type == MT_OOBDATA) 1356 flags |= MSG_OOB; 1357 } 1358 1359 /* 1360 * Copy to the UIO or mbuf return chain (*mp). 1361 */ 1362 moff = 0; 1363 offset = 0; 1364 while (m && resid > 0 && error == 0) { 1365 if (m->m_type == MT_OOBDATA) { 1366 if (type != MT_OOBDATA) 1367 break; 1368 } else if (type == MT_OOBDATA) 1369 break; 1370 else 1371 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1372 ("receive 3")); 1373 soclrstate(so, SS_RCVATMARK); 1374 len = (resid > INT_MAX) ? INT_MAX : resid; 1375 if (so->so_oobmark && len > so->so_oobmark - offset) 1376 len = so->so_oobmark - offset; 1377 if (len > m->m_len - moff) 1378 len = m->m_len - moff; 1379 1380 /* 1381 * Copy out to the UIO or pass the mbufs back to the SIO. 1382 * The SIO is dealt with when we eat the mbuf, but deal 1383 * with the resid here either way. 1384 */ 1385 if (uio) { 1386 uio->uio_resid = resid; 1387 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1388 resid = uio->uio_resid; 1389 if (error) 1390 goto release; 1391 } else { 1392 resid -= (size_t)len; 1393 } 1394 1395 /* 1396 * Eat the entire mbuf or just a piece of it 1397 */ 1398 if (len == m->m_len - moff) { 1399 if (m->m_flags & M_EOR) 1400 flags |= MSG_EOR; 1401 #ifdef SCTP 1402 if (m->m_flags & M_NOTIFICATION) 1403 flags |= MSG_NOTIFICATION; 1404 #endif /* SCTP */ 1405 if (flags & MSG_PEEK) { 1406 m = m->m_next; 1407 moff = 0; 1408 } else { 1409 if (sio) { 1410 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1411 sbappend(sio, m); 1412 m = n; 1413 } else { 1414 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1415 } 1416 } 1417 } else { 1418 if (flags & MSG_PEEK) { 1419 moff += len; 1420 } else { 1421 if (sio) { 1422 n = m_copym(m, 0, len, MB_WAIT); 1423 if (n) 1424 sbappend(sio, n); 1425 } 1426 m->m_data += len; 1427 m->m_len -= len; 1428 so->so_rcv.ssb_cc -= len; 1429 } 1430 } 1431 if (so->so_oobmark) { 1432 if ((flags & MSG_PEEK) == 0) { 1433 so->so_oobmark -= len; 1434 if (so->so_oobmark == 0) { 1435 sosetstate(so, SS_RCVATMARK); 1436 break; 1437 } 1438 } else { 1439 offset += len; 1440 if (offset == so->so_oobmark) 1441 break; 1442 } 1443 } 1444 if (flags & MSG_EOR) 1445 break; 1446 /* 1447 * If the MSG_WAITALL flag is set (for non-atomic socket), 1448 * we must not quit until resid == 0 or an error 1449 * termination. If a signal/timeout occurs, return 1450 * with a short count but without error. 1451 * Keep signalsockbuf locked against other readers. 1452 */ 1453 while ((flags & MSG_WAITALL) && m == NULL && 1454 resid > 0 && !sosendallatonce(so) && 1455 so->so_rcv.ssb_mb == NULL) { 1456 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1457 break; 1458 /* 1459 * The window might have closed to zero, make 1460 * sure we send an ack now that we've drained 1461 * the buffer or we might end up blocking until 1462 * the idle takes over (5 seconds). 1463 */ 1464 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1465 so_pru_rcvd(so, flags); 1466 error = ssb_wait(&so->so_rcv); 1467 if (error) { 1468 ssb_unlock(&so->so_rcv); 1469 error = 0; 1470 goto done; 1471 } 1472 m = so->so_rcv.ssb_mb; 1473 } 1474 } 1475 1476 /* 1477 * If an atomic read was requested but unread data still remains 1478 * in the record, set MSG_TRUNC. 1479 */ 1480 if (m && pr->pr_flags & PR_ATOMIC) 1481 flags |= MSG_TRUNC; 1482 1483 /* 1484 * Cleanup. If an atomic read was requested drop any unread data. 1485 */ 1486 if ((flags & MSG_PEEK) == 0) { 1487 if (m && (pr->pr_flags & PR_ATOMIC)) 1488 sbdroprecord(&so->so_rcv.sb); 1489 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1490 so_pru_rcvd(so, flags); 1491 } 1492 1493 if (orig_resid == resid && orig_resid && 1494 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1495 ssb_unlock(&so->so_rcv); 1496 goto restart; 1497 } 1498 1499 if (flagsp) 1500 *flagsp |= flags; 1501 release: 1502 ssb_unlock(&so->so_rcv); 1503 done: 1504 lwkt_reltoken(&so->so_rcv.ssb_token); 1505 if (free_chain) 1506 m_freem(free_chain); 1507 return (error); 1508 } 1509 1510 int 1511 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1512 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1513 { 1514 struct mbuf *m, *n; 1515 struct mbuf *free_chain = NULL; 1516 int flags, len, error, offset; 1517 struct protosw *pr = so->so_proto; 1518 int moff; 1519 size_t resid, orig_resid; 1520 1521 if (uio) 1522 resid = uio->uio_resid; 1523 else 1524 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1525 orig_resid = resid; 1526 1527 if (psa) 1528 *psa = NULL; 1529 if (controlp) 1530 *controlp = NULL; 1531 if (flagsp) 1532 flags = *flagsp &~ MSG_EOR; 1533 else 1534 flags = 0; 1535 if (flags & MSG_OOB) { 1536 m = m_get(MB_WAIT, MT_DATA); 1537 if (m == NULL) 1538 return (ENOBUFS); 1539 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1540 if (error) 1541 goto bad; 1542 if (sio) { 1543 do { 1544 sbappend(sio, m); 1545 KKASSERT(resid >= (size_t)m->m_len); 1546 resid -= (size_t)m->m_len; 1547 } while (resid > 0 && m); 1548 } else { 1549 do { 1550 uio->uio_resid = resid; 1551 error = uiomove(mtod(m, caddr_t), 1552 (int)szmin(resid, m->m_len), 1553 uio); 1554 resid = uio->uio_resid; 1555 m = m_free(m); 1556 } while (uio->uio_resid && error == 0 && m); 1557 } 1558 bad: 1559 if (m) 1560 m_freem(m); 1561 return (error); 1562 } 1563 1564 /* 1565 * The token interlocks against the protocol thread while 1566 * ssb_lock is a blocking lock against other userland entities. 1567 */ 1568 lwkt_gettoken(&so->so_rcv.ssb_token); 1569 restart: 1570 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1571 if (error) 1572 goto done; 1573 1574 m = so->so_rcv.ssb_mb; 1575 /* 1576 * If we have less data than requested, block awaiting more 1577 * (subject to any timeout) if: 1578 * 1. the current count is less than the low water mark, or 1579 * 2. MSG_WAITALL is set, and it is possible to do the entire 1580 * receive operation at once if we block (resid <= hiwat). 1581 * 3. MSG_DONTWAIT is not set 1582 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1583 * we have to do the receive in sections, and thus risk returning 1584 * a short count if a timeout or signal occurs after we start. 1585 */ 1586 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1587 (size_t)so->so_rcv.ssb_cc < resid) && 1588 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1589 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1590 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1591 if (so->so_error) { 1592 if (m) 1593 goto dontblock; 1594 error = so->so_error; 1595 if ((flags & MSG_PEEK) == 0) 1596 so->so_error = 0; 1597 goto release; 1598 } 1599 if (so->so_state & SS_CANTRCVMORE) { 1600 if (m) 1601 goto dontblock; 1602 else 1603 goto release; 1604 } 1605 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1606 (pr->pr_flags & PR_CONNREQUIRED)) { 1607 error = ENOTCONN; 1608 goto release; 1609 } 1610 if (resid == 0) 1611 goto release; 1612 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1613 error = EWOULDBLOCK; 1614 goto release; 1615 } 1616 ssb_unlock(&so->so_rcv); 1617 error = ssb_wait(&so->so_rcv); 1618 if (error) 1619 goto done; 1620 goto restart; 1621 } 1622 dontblock: 1623 if (uio && uio->uio_td && uio->uio_td->td_proc) 1624 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1625 1626 /* 1627 * note: m should be == sb_mb here. Cache the next record while 1628 * cleaning up. Note that calling m_free*() will break out critical 1629 * section. 1630 */ 1631 KKASSERT(m == so->so_rcv.ssb_mb); 1632 1633 /* 1634 * Copy to the UIO or mbuf return chain (*mp). 1635 */ 1636 moff = 0; 1637 offset = 0; 1638 while (m && resid > 0 && error == 0) { 1639 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1640 ("receive 3")); 1641 1642 soclrstate(so, SS_RCVATMARK); 1643 len = (resid > INT_MAX) ? INT_MAX : resid; 1644 if (so->so_oobmark && len > so->so_oobmark - offset) 1645 len = so->so_oobmark - offset; 1646 if (len > m->m_len - moff) 1647 len = m->m_len - moff; 1648 1649 /* 1650 * Copy out to the UIO or pass the mbufs back to the SIO. 1651 * The SIO is dealt with when we eat the mbuf, but deal 1652 * with the resid here either way. 1653 */ 1654 if (uio) { 1655 uio->uio_resid = resid; 1656 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1657 resid = uio->uio_resid; 1658 if (error) 1659 goto release; 1660 } else { 1661 resid -= (size_t)len; 1662 } 1663 1664 /* 1665 * Eat the entire mbuf or just a piece of it 1666 */ 1667 if (len == m->m_len - moff) { 1668 if (flags & MSG_PEEK) { 1669 m = m->m_next; 1670 moff = 0; 1671 } else { 1672 if (sio) { 1673 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1674 sbappend(sio, m); 1675 m = n; 1676 } else { 1677 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1678 } 1679 } 1680 } else { 1681 if (flags & MSG_PEEK) { 1682 moff += len; 1683 } else { 1684 if (sio) { 1685 n = m_copym(m, 0, len, MB_WAIT); 1686 if (n) 1687 sbappend(sio, n); 1688 } 1689 m->m_data += len; 1690 m->m_len -= len; 1691 so->so_rcv.ssb_cc -= len; 1692 } 1693 } 1694 if (so->so_oobmark) { 1695 if ((flags & MSG_PEEK) == 0) { 1696 so->so_oobmark -= len; 1697 if (so->so_oobmark == 0) { 1698 sosetstate(so, SS_RCVATMARK); 1699 break; 1700 } 1701 } else { 1702 offset += len; 1703 if (offset == so->so_oobmark) 1704 break; 1705 } 1706 } 1707 /* 1708 * If the MSG_WAITALL flag is set (for non-atomic socket), 1709 * we must not quit until resid == 0 or an error 1710 * termination. If a signal/timeout occurs, return 1711 * with a short count but without error. 1712 * Keep signalsockbuf locked against other readers. 1713 */ 1714 while ((flags & MSG_WAITALL) && m == NULL && 1715 resid > 0 && !sosendallatonce(so) && 1716 so->so_rcv.ssb_mb == NULL) { 1717 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1718 break; 1719 /* 1720 * The window might have closed to zero, make 1721 * sure we send an ack now that we've drained 1722 * the buffer or we might end up blocking until 1723 * the idle takes over (5 seconds). 1724 */ 1725 if (so->so_pcb) 1726 so_pru_rcvd_async(so); 1727 error = ssb_wait(&so->so_rcv); 1728 if (error) { 1729 ssb_unlock(&so->so_rcv); 1730 error = 0; 1731 goto done; 1732 } 1733 m = so->so_rcv.ssb_mb; 1734 } 1735 } 1736 1737 /* 1738 * Cleanup. If an atomic read was requested drop any unread data. 1739 */ 1740 if ((flags & MSG_PEEK) == 0) { 1741 if (so->so_pcb) 1742 so_pru_rcvd_async(so); 1743 } 1744 1745 if (orig_resid == resid && orig_resid && 1746 (so->so_state & SS_CANTRCVMORE) == 0) { 1747 ssb_unlock(&so->so_rcv); 1748 goto restart; 1749 } 1750 1751 if (flagsp) 1752 *flagsp |= flags; 1753 release: 1754 ssb_unlock(&so->so_rcv); 1755 done: 1756 lwkt_reltoken(&so->so_rcv.ssb_token); 1757 if (free_chain) 1758 m_freem(free_chain); 1759 return (error); 1760 } 1761 1762 /* 1763 * Shut a socket down. Note that we do not get a frontend lock as we 1764 * want to be able to shut the socket down even if another thread is 1765 * blocked in a read(), thus waking it up. 1766 */ 1767 int 1768 soshutdown(struct socket *so, int how) 1769 { 1770 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1771 return (EINVAL); 1772 1773 if (how != SHUT_WR) { 1774 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1775 sorflush(so); 1776 /*ssb_unlock(&so->so_rcv);*/ 1777 } 1778 if (how != SHUT_RD) 1779 return (so_pru_shutdown(so)); 1780 return (0); 1781 } 1782 1783 void 1784 sorflush(struct socket *so) 1785 { 1786 struct signalsockbuf *ssb = &so->so_rcv; 1787 struct protosw *pr = so->so_proto; 1788 struct signalsockbuf asb; 1789 1790 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1791 1792 lwkt_gettoken(&ssb->ssb_token); 1793 socantrcvmore(so); 1794 asb = *ssb; 1795 1796 /* 1797 * Can't just blow up the ssb structure here 1798 */ 1799 bzero(&ssb->sb, sizeof(ssb->sb)); 1800 ssb->ssb_timeo = 0; 1801 ssb->ssb_lowat = 0; 1802 ssb->ssb_hiwat = 0; 1803 ssb->ssb_mbmax = 0; 1804 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1805 1806 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1807 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1808 ssb_release(&asb, so); 1809 1810 lwkt_reltoken(&ssb->ssb_token); 1811 } 1812 1813 #ifdef INET 1814 static int 1815 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1816 { 1817 struct accept_filter_arg *afap = NULL; 1818 struct accept_filter *afp; 1819 struct so_accf *af = so->so_accf; 1820 int error = 0; 1821 1822 /* do not set/remove accept filters on non listen sockets */ 1823 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1824 error = EINVAL; 1825 goto out; 1826 } 1827 1828 /* removing the filter */ 1829 if (sopt == NULL) { 1830 if (af != NULL) { 1831 if (af->so_accept_filter != NULL && 1832 af->so_accept_filter->accf_destroy != NULL) { 1833 af->so_accept_filter->accf_destroy(so); 1834 } 1835 if (af->so_accept_filter_str != NULL) { 1836 kfree(af->so_accept_filter_str, M_ACCF); 1837 } 1838 kfree(af, M_ACCF); 1839 so->so_accf = NULL; 1840 } 1841 so->so_options &= ~SO_ACCEPTFILTER; 1842 return (0); 1843 } 1844 /* adding a filter */ 1845 /* must remove previous filter first */ 1846 if (af != NULL) { 1847 error = EINVAL; 1848 goto out; 1849 } 1850 /* don't put large objects on the kernel stack */ 1851 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 1852 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1853 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1854 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1855 if (error) 1856 goto out; 1857 afp = accept_filt_get(afap->af_name); 1858 if (afp == NULL) { 1859 error = ENOENT; 1860 goto out; 1861 } 1862 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1863 if (afp->accf_create != NULL) { 1864 if (afap->af_name[0] != '\0') { 1865 int len = strlen(afap->af_name) + 1; 1866 1867 af->so_accept_filter_str = kmalloc(len, M_ACCF, 1868 M_WAITOK); 1869 strcpy(af->so_accept_filter_str, afap->af_name); 1870 } 1871 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1872 if (af->so_accept_filter_arg == NULL) { 1873 kfree(af->so_accept_filter_str, M_ACCF); 1874 kfree(af, M_ACCF); 1875 so->so_accf = NULL; 1876 error = EINVAL; 1877 goto out; 1878 } 1879 } 1880 af->so_accept_filter = afp; 1881 so->so_accf = af; 1882 so->so_options |= SO_ACCEPTFILTER; 1883 out: 1884 if (afap != NULL) 1885 kfree(afap, M_TEMP); 1886 return (error); 1887 } 1888 #endif /* INET */ 1889 1890 /* 1891 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1892 * an additional variant to handle the case where the option value needs 1893 * to be some kind of integer, but not a specific size. 1894 * In addition to their use here, these functions are also called by the 1895 * protocol-level pr_ctloutput() routines. 1896 */ 1897 int 1898 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1899 { 1900 return soopt_to_kbuf(sopt, buf, len, minlen); 1901 } 1902 1903 int 1904 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1905 { 1906 size_t valsize; 1907 1908 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1909 KKASSERT(kva_p(buf)); 1910 1911 /* 1912 * If the user gives us more than we wanted, we ignore it, 1913 * but if we don't get the minimum length the caller 1914 * wants, we return EINVAL. On success, sopt->sopt_valsize 1915 * is set to however much we actually retrieved. 1916 */ 1917 if ((valsize = sopt->sopt_valsize) < minlen) 1918 return EINVAL; 1919 if (valsize > len) 1920 sopt->sopt_valsize = valsize = len; 1921 1922 bcopy(sopt->sopt_val, buf, valsize); 1923 return 0; 1924 } 1925 1926 1927 int 1928 sosetopt(struct socket *so, struct sockopt *sopt) 1929 { 1930 int error, optval; 1931 struct linger l; 1932 struct timeval tv; 1933 u_long val; 1934 struct signalsockbuf *sotmp; 1935 1936 error = 0; 1937 sopt->sopt_dir = SOPT_SET; 1938 if (sopt->sopt_level != SOL_SOCKET) { 1939 if (so->so_proto && so->so_proto->pr_ctloutput) { 1940 return (so_pr_ctloutput(so, sopt)); 1941 } 1942 error = ENOPROTOOPT; 1943 } else { 1944 switch (sopt->sopt_name) { 1945 #ifdef INET 1946 case SO_ACCEPTFILTER: 1947 error = do_setopt_accept_filter(so, sopt); 1948 if (error) 1949 goto bad; 1950 break; 1951 #endif /* INET */ 1952 case SO_LINGER: 1953 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1954 if (error) 1955 goto bad; 1956 1957 so->so_linger = l.l_linger; 1958 if (l.l_onoff) 1959 so->so_options |= SO_LINGER; 1960 else 1961 so->so_options &= ~SO_LINGER; 1962 break; 1963 1964 case SO_DEBUG: 1965 case SO_KEEPALIVE: 1966 case SO_DONTROUTE: 1967 case SO_USELOOPBACK: 1968 case SO_BROADCAST: 1969 case SO_REUSEADDR: 1970 case SO_REUSEPORT: 1971 case SO_OOBINLINE: 1972 case SO_TIMESTAMP: 1973 case SO_NOSIGPIPE: 1974 error = sooptcopyin(sopt, &optval, sizeof optval, 1975 sizeof optval); 1976 if (error) 1977 goto bad; 1978 if (optval) 1979 so->so_options |= sopt->sopt_name; 1980 else 1981 so->so_options &= ~sopt->sopt_name; 1982 break; 1983 1984 case SO_SNDBUF: 1985 case SO_RCVBUF: 1986 case SO_SNDLOWAT: 1987 case SO_RCVLOWAT: 1988 error = sooptcopyin(sopt, &optval, sizeof optval, 1989 sizeof optval); 1990 if (error) 1991 goto bad; 1992 1993 /* 1994 * Values < 1 make no sense for any of these 1995 * options, so disallow them. 1996 */ 1997 if (optval < 1) { 1998 error = EINVAL; 1999 goto bad; 2000 } 2001 2002 switch (sopt->sopt_name) { 2003 case SO_SNDBUF: 2004 case SO_RCVBUF: 2005 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2006 &so->so_snd : &so->so_rcv, (u_long)optval, 2007 so, 2008 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2009 error = ENOBUFS; 2010 goto bad; 2011 } 2012 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2013 &so->so_snd : &so->so_rcv; 2014 atomic_clear_int(&sotmp->ssb_flags, 2015 SSB_AUTOSIZE); 2016 break; 2017 2018 /* 2019 * Make sure the low-water is never greater than 2020 * the high-water. 2021 */ 2022 case SO_SNDLOWAT: 2023 so->so_snd.ssb_lowat = 2024 (optval > so->so_snd.ssb_hiwat) ? 2025 so->so_snd.ssb_hiwat : optval; 2026 atomic_clear_int(&so->so_snd.ssb_flags, 2027 SSB_AUTOLOWAT); 2028 break; 2029 case SO_RCVLOWAT: 2030 so->so_rcv.ssb_lowat = 2031 (optval > so->so_rcv.ssb_hiwat) ? 2032 so->so_rcv.ssb_hiwat : optval; 2033 atomic_clear_int(&so->so_rcv.ssb_flags, 2034 SSB_AUTOLOWAT); 2035 break; 2036 } 2037 break; 2038 2039 case SO_SNDTIMEO: 2040 case SO_RCVTIMEO: 2041 error = sooptcopyin(sopt, &tv, sizeof tv, 2042 sizeof tv); 2043 if (error) 2044 goto bad; 2045 2046 /* assert(hz > 0); */ 2047 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2048 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2049 error = EDOM; 2050 goto bad; 2051 } 2052 /* assert(tick > 0); */ 2053 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2054 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2055 if (val > INT_MAX) { 2056 error = EDOM; 2057 goto bad; 2058 } 2059 if (val == 0 && tv.tv_usec != 0) 2060 val = 1; 2061 2062 switch (sopt->sopt_name) { 2063 case SO_SNDTIMEO: 2064 so->so_snd.ssb_timeo = val; 2065 break; 2066 case SO_RCVTIMEO: 2067 so->so_rcv.ssb_timeo = val; 2068 break; 2069 } 2070 break; 2071 default: 2072 error = ENOPROTOOPT; 2073 break; 2074 } 2075 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2076 (void) so_pr_ctloutput(so, sopt); 2077 } 2078 } 2079 bad: 2080 return (error); 2081 } 2082 2083 /* Helper routine for getsockopt */ 2084 int 2085 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2086 { 2087 soopt_from_kbuf(sopt, buf, len); 2088 return 0; 2089 } 2090 2091 void 2092 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2093 { 2094 size_t valsize; 2095 2096 if (len == 0) { 2097 sopt->sopt_valsize = 0; 2098 return; 2099 } 2100 2101 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2102 KKASSERT(kva_p(buf)); 2103 2104 /* 2105 * Documented get behavior is that we always return a value, 2106 * possibly truncated to fit in the user's buffer. 2107 * Traditional behavior is that we always tell the user 2108 * precisely how much we copied, rather than something useful 2109 * like the total amount we had available for her. 2110 * Note that this interface is not idempotent; the entire answer must 2111 * generated ahead of time. 2112 */ 2113 valsize = szmin(len, sopt->sopt_valsize); 2114 sopt->sopt_valsize = valsize; 2115 if (sopt->sopt_val != 0) { 2116 bcopy(buf, sopt->sopt_val, valsize); 2117 } 2118 } 2119 2120 int 2121 sogetopt(struct socket *so, struct sockopt *sopt) 2122 { 2123 int error, optval; 2124 long optval_l; 2125 struct linger l; 2126 struct timeval tv; 2127 #ifdef INET 2128 struct accept_filter_arg *afap; 2129 #endif 2130 2131 error = 0; 2132 sopt->sopt_dir = SOPT_GET; 2133 if (sopt->sopt_level != SOL_SOCKET) { 2134 if (so->so_proto && so->so_proto->pr_ctloutput) { 2135 return (so_pr_ctloutput(so, sopt)); 2136 } else 2137 return (ENOPROTOOPT); 2138 } else { 2139 switch (sopt->sopt_name) { 2140 #ifdef INET 2141 case SO_ACCEPTFILTER: 2142 if ((so->so_options & SO_ACCEPTCONN) == 0) 2143 return (EINVAL); 2144 afap = kmalloc(sizeof(*afap), M_TEMP, 2145 M_WAITOK | M_ZERO); 2146 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2147 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2148 if (so->so_accf->so_accept_filter_str != NULL) 2149 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2150 } 2151 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2152 kfree(afap, M_TEMP); 2153 break; 2154 #endif /* INET */ 2155 2156 case SO_LINGER: 2157 l.l_onoff = so->so_options & SO_LINGER; 2158 l.l_linger = so->so_linger; 2159 error = sooptcopyout(sopt, &l, sizeof l); 2160 break; 2161 2162 case SO_USELOOPBACK: 2163 case SO_DONTROUTE: 2164 case SO_DEBUG: 2165 case SO_KEEPALIVE: 2166 case SO_REUSEADDR: 2167 case SO_REUSEPORT: 2168 case SO_BROADCAST: 2169 case SO_OOBINLINE: 2170 case SO_TIMESTAMP: 2171 case SO_NOSIGPIPE: 2172 optval = so->so_options & sopt->sopt_name; 2173 integer: 2174 error = sooptcopyout(sopt, &optval, sizeof optval); 2175 break; 2176 2177 case SO_TYPE: 2178 optval = so->so_type; 2179 goto integer; 2180 2181 case SO_ERROR: 2182 optval = so->so_error; 2183 so->so_error = 0; 2184 goto integer; 2185 2186 case SO_SNDBUF: 2187 optval = so->so_snd.ssb_hiwat; 2188 goto integer; 2189 2190 case SO_RCVBUF: 2191 optval = so->so_rcv.ssb_hiwat; 2192 goto integer; 2193 2194 case SO_SNDLOWAT: 2195 optval = so->so_snd.ssb_lowat; 2196 goto integer; 2197 2198 case SO_RCVLOWAT: 2199 optval = so->so_rcv.ssb_lowat; 2200 goto integer; 2201 2202 case SO_SNDTIMEO: 2203 case SO_RCVTIMEO: 2204 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2205 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2206 2207 tv.tv_sec = optval / hz; 2208 tv.tv_usec = (optval % hz) * ustick; 2209 error = sooptcopyout(sopt, &tv, sizeof tv); 2210 break; 2211 2212 case SO_SNDSPACE: 2213 optval_l = ssb_space(&so->so_snd); 2214 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2215 break; 2216 2217 default: 2218 error = ENOPROTOOPT; 2219 break; 2220 } 2221 return (error); 2222 } 2223 } 2224 2225 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2226 int 2227 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2228 { 2229 struct mbuf *m, *m_prev; 2230 int sopt_size = sopt->sopt_valsize, msize; 2231 2232 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA, 2233 0, &msize); 2234 if (m == NULL) 2235 return (ENOBUFS); 2236 m->m_len = min(msize, sopt_size); 2237 sopt_size -= m->m_len; 2238 *mp = m; 2239 m_prev = m; 2240 2241 while (sopt_size > 0) { 2242 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, 2243 MT_DATA, 0, &msize); 2244 if (m == NULL) { 2245 m_freem(*mp); 2246 return (ENOBUFS); 2247 } 2248 m->m_len = min(msize, sopt_size); 2249 sopt_size -= m->m_len; 2250 m_prev->m_next = m; 2251 m_prev = m; 2252 } 2253 return (0); 2254 } 2255 2256 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2257 int 2258 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2259 { 2260 soopt_to_mbuf(sopt, m); 2261 return 0; 2262 } 2263 2264 void 2265 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2266 { 2267 size_t valsize; 2268 void *val; 2269 2270 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2271 KKASSERT(kva_p(m)); 2272 if (sopt->sopt_val == NULL) 2273 return; 2274 val = sopt->sopt_val; 2275 valsize = sopt->sopt_valsize; 2276 while (m != NULL && valsize >= m->m_len) { 2277 bcopy(val, mtod(m, char *), m->m_len); 2278 valsize -= m->m_len; 2279 val = (caddr_t)val + m->m_len; 2280 m = m->m_next; 2281 } 2282 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2283 panic("ip6_sooptmcopyin"); 2284 } 2285 2286 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2287 int 2288 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2289 { 2290 return soopt_from_mbuf(sopt, m); 2291 } 2292 2293 int 2294 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2295 { 2296 struct mbuf *m0 = m; 2297 size_t valsize = 0; 2298 size_t maxsize; 2299 void *val; 2300 2301 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2302 KKASSERT(kva_p(m)); 2303 if (sopt->sopt_val == NULL) 2304 return 0; 2305 val = sopt->sopt_val; 2306 maxsize = sopt->sopt_valsize; 2307 while (m != NULL && maxsize >= m->m_len) { 2308 bcopy(mtod(m, char *), val, m->m_len); 2309 maxsize -= m->m_len; 2310 val = (caddr_t)val + m->m_len; 2311 valsize += m->m_len; 2312 m = m->m_next; 2313 } 2314 if (m != NULL) { 2315 /* enough soopt buffer should be given from user-land */ 2316 m_freem(m0); 2317 return (EINVAL); 2318 } 2319 sopt->sopt_valsize = valsize; 2320 return 0; 2321 } 2322 2323 void 2324 sohasoutofband(struct socket *so) 2325 { 2326 if (so->so_sigio != NULL) 2327 pgsigio(so->so_sigio, SIGURG, 0); 2328 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2329 } 2330 2331 int 2332 sokqfilter(struct file *fp, struct knote *kn) 2333 { 2334 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2335 struct signalsockbuf *ssb; 2336 2337 switch (kn->kn_filter) { 2338 case EVFILT_READ: 2339 if (so->so_options & SO_ACCEPTCONN) 2340 kn->kn_fop = &solisten_filtops; 2341 else 2342 kn->kn_fop = &soread_filtops; 2343 ssb = &so->so_rcv; 2344 break; 2345 case EVFILT_WRITE: 2346 kn->kn_fop = &sowrite_filtops; 2347 ssb = &so->so_snd; 2348 break; 2349 case EVFILT_EXCEPT: 2350 kn->kn_fop = &soexcept_filtops; 2351 ssb = &so->so_rcv; 2352 break; 2353 default: 2354 return (EOPNOTSUPP); 2355 } 2356 2357 knote_insert(&ssb->ssb_kq.ki_note, kn); 2358 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2359 return (0); 2360 } 2361 2362 static void 2363 filt_sordetach(struct knote *kn) 2364 { 2365 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2366 2367 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2368 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2369 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2370 } 2371 2372 /*ARGSUSED*/ 2373 static int 2374 filt_soread(struct knote *kn, long hint) 2375 { 2376 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2377 2378 if (kn->kn_sfflags & NOTE_OOB) { 2379 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2380 kn->kn_fflags |= NOTE_OOB; 2381 return (1); 2382 } 2383 return (0); 2384 } 2385 kn->kn_data = so->so_rcv.ssb_cc; 2386 2387 if (so->so_state & SS_CANTRCVMORE) { 2388 /* 2389 * Only set NODATA if all data has been exhausted. 2390 */ 2391 if (kn->kn_data == 0) 2392 kn->kn_flags |= EV_NODATA; 2393 kn->kn_flags |= EV_EOF; 2394 kn->kn_fflags = so->so_error; 2395 return (1); 2396 } 2397 if (so->so_error) /* temporary udp error */ 2398 return (1); 2399 if (kn->kn_sfflags & NOTE_LOWAT) 2400 return (kn->kn_data >= kn->kn_sdata); 2401 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2402 !TAILQ_EMPTY(&so->so_comp)); 2403 } 2404 2405 static void 2406 filt_sowdetach(struct knote *kn) 2407 { 2408 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2409 2410 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2411 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2412 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2413 } 2414 2415 /*ARGSUSED*/ 2416 static int 2417 filt_sowrite(struct knote *kn, long hint) 2418 { 2419 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2420 2421 kn->kn_data = ssb_space(&so->so_snd); 2422 if (so->so_state & SS_CANTSENDMORE) { 2423 kn->kn_flags |= (EV_EOF | EV_NODATA); 2424 kn->kn_fflags = so->so_error; 2425 return (1); 2426 } 2427 if (so->so_error) /* temporary udp error */ 2428 return (1); 2429 if (((so->so_state & SS_ISCONNECTED) == 0) && 2430 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2431 return (0); 2432 if (kn->kn_sfflags & NOTE_LOWAT) 2433 return (kn->kn_data >= kn->kn_sdata); 2434 return (kn->kn_data >= so->so_snd.ssb_lowat); 2435 } 2436 2437 /*ARGSUSED*/ 2438 static int 2439 filt_solisten(struct knote *kn, long hint) 2440 { 2441 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2442 2443 kn->kn_data = so->so_qlen; 2444 return (! TAILQ_EMPTY(&so->so_comp)); 2445 } 2446