1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 4. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 #include "opt_sctp.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/fcntl.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/domain.h> 75 #include <sys/file.h> /* for struct knote */ 76 #include <sys/kernel.h> 77 #include <sys/event.h> 78 #include <sys/proc.h> 79 #include <sys/protosw.h> 80 #include <sys/socket.h> 81 #include <sys/socketvar.h> 82 #include <sys/socketops.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/sysctl.h> 86 #include <sys/uio.h> 87 #include <sys/jail.h> 88 #include <vm/vm_zone.h> 89 #include <vm/pmap.h> 90 #include <net/netmsg2.h> 91 #include <net/netisr2.h> 92 93 #include <sys/thread2.h> 94 #include <sys/socketvar2.h> 95 #include <sys/spinlock2.h> 96 97 #include <machine/limits.h> 98 99 #ifdef INET 100 extern int tcp_sosend_agglim; 101 extern int tcp_sosend_async; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static void sodiscard(struct socket *so); 115 static int soclose_sync(struct socket *so, int fflag); 116 static void soclose_fast(struct socket *so); 117 118 static struct filterops solisten_filtops = 119 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 120 static struct filterops soread_filtops = 121 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 122 static struct filterops sowrite_filtops = 123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 124 static struct filterops soexcept_filtops = 125 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 126 127 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 128 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 129 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 130 131 132 static int somaxconn = SOMAXCONN; 133 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 134 &somaxconn, 0, "Maximum pending socket connection queue size"); 135 136 static int use_soclose_fast = 1; 137 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 138 &use_soclose_fast, 0, "Fast socket close"); 139 140 int use_soaccept_pred_fast = 1; 141 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 142 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 143 144 int use_sendfile_async = 1; 145 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 146 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 147 148 /* 149 * Socket operation routines. 150 * These routines are called by the routines in 151 * sys_socket.c or from a system process, and 152 * implement the semantics of socket operations by 153 * switching out to the protocol specific routines. 154 */ 155 156 /* 157 * Get a socket structure, and initialize it. 158 * Note that it would probably be better to allocate socket 159 * and PCB at the same time, but I'm not convinced that all 160 * the protocols can be easily modified to do this. 161 */ 162 struct socket * 163 soalloc(int waitok, struct protosw *pr) 164 { 165 struct socket *so; 166 unsigned waitmask; 167 168 waitmask = waitok ? M_WAITOK : M_NOWAIT; 169 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 170 if (so) { 171 /* XXX race condition for reentrant kernel */ 172 so->so_proto = pr; 173 TAILQ_INIT(&so->so_aiojobq); 174 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 175 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 176 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 177 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 178 spin_init(&so->so_rcvd_spin); 179 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 180 MSGF_DROPABLE, so->so_proto->pr_usrreqs->pru_rcvd); 181 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 182 so->so_state = SS_NOFDREF; 183 so->so_refs = 1; 184 } 185 return so; 186 } 187 188 int 189 socreate(int dom, struct socket **aso, int type, 190 int proto, struct thread *td) 191 { 192 struct proc *p = td->td_proc; 193 struct protosw *prp; 194 struct socket *so; 195 struct pru_attach_info ai; 196 int error; 197 198 if (proto) 199 prp = pffindproto(dom, proto, type); 200 else 201 prp = pffindtype(dom, type); 202 203 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 204 return (EPROTONOSUPPORT); 205 206 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 207 prp->pr_domain->dom_family != PF_LOCAL && 208 prp->pr_domain->dom_family != PF_INET && 209 prp->pr_domain->dom_family != PF_INET6 && 210 prp->pr_domain->dom_family != PF_ROUTE) { 211 return (EPROTONOSUPPORT); 212 } 213 214 if (prp->pr_type != type) 215 return (EPROTOTYPE); 216 so = soalloc(p != NULL, prp); 217 if (so == NULL) 218 return (ENOBUFS); 219 220 /* 221 * Callers of socreate() presumably will connect up a descriptor 222 * and call soclose() if they cannot. This represents our so_refs 223 * (which should be 1) from soalloc(). 224 */ 225 soclrstate(so, SS_NOFDREF); 226 227 /* 228 * Set a default port for protocol processing. No action will occur 229 * on the socket on this port until an inpcb is attached to it and 230 * is able to match incoming packets, or until the socket becomes 231 * available to userland. 232 * 233 * We normally default the socket to the protocol thread on cpu 0. 234 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 235 * thread and all pr_*()/pru_*() calls are executed synchronously. 236 */ 237 if (prp->pr_flags & PR_SYNC_PORT) 238 so->so_port = &netisr_sync_port; 239 else 240 so->so_port = netisr_cpuport(0); 241 242 TAILQ_INIT(&so->so_incomp); 243 TAILQ_INIT(&so->so_comp); 244 so->so_type = type; 245 so->so_cred = crhold(p->p_ucred); 246 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 247 ai.p_ucred = p->p_ucred; 248 ai.fd_rdir = p->p_fd->fd_rdir; 249 250 /* 251 * Auto-sizing of socket buffers is managed by the protocols and 252 * the appropriate flags must be set in the pru_attach function. 253 */ 254 error = so_pru_attach(so, proto, &ai); 255 if (error) { 256 sosetstate(so, SS_NOFDREF); 257 sofree(so); /* from soalloc */ 258 return error; 259 } 260 261 /* 262 * NOTE: Returns referenced socket. 263 */ 264 *aso = so; 265 return (0); 266 } 267 268 int 269 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 270 { 271 int error; 272 273 error = so_pru_bind(so, nam, td); 274 return (error); 275 } 276 277 static void 278 sodealloc(struct socket *so) 279 { 280 if (so->so_rcv.ssb_hiwat) 281 (void)chgsbsize(so->so_cred->cr_uidinfo, 282 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 283 if (so->so_snd.ssb_hiwat) 284 (void)chgsbsize(so->so_cred->cr_uidinfo, 285 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 286 #ifdef INET 287 /* remove accept filter if present */ 288 if (so->so_accf != NULL) 289 do_setopt_accept_filter(so, NULL); 290 #endif /* INET */ 291 crfree(so->so_cred); 292 if (so->so_faddr != NULL) 293 kfree(so->so_faddr, M_SONAME); 294 kfree(so, M_SOCKET); 295 } 296 297 int 298 solisten(struct socket *so, int backlog, struct thread *td) 299 { 300 int error; 301 #ifdef SCTP 302 short oldopt, oldqlimit; 303 #endif /* SCTP */ 304 305 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 306 return (EINVAL); 307 308 #ifdef SCTP 309 oldopt = so->so_options; 310 oldqlimit = so->so_qlimit; 311 #endif /* SCTP */ 312 313 lwkt_gettoken(&so->so_rcv.ssb_token); 314 if (TAILQ_EMPTY(&so->so_comp)) 315 so->so_options |= SO_ACCEPTCONN; 316 lwkt_reltoken(&so->so_rcv.ssb_token); 317 if (backlog < 0 || backlog > somaxconn) 318 backlog = somaxconn; 319 so->so_qlimit = backlog; 320 /* SCTP needs to look at tweak both the inbound backlog parameter AND 321 * the so_options (UDP model both connect's and gets inbound 322 * connections .. implicitly). 323 */ 324 error = so_pru_listen(so, td); 325 if (error) { 326 #ifdef SCTP 327 /* Restore the params */ 328 so->so_options = oldopt; 329 so->so_qlimit = oldqlimit; 330 #endif /* SCTP */ 331 return (error); 332 } 333 return (0); 334 } 335 336 /* 337 * Destroy a disconnected socket. This routine is a NOP if entities 338 * still have a reference on the socket: 339 * 340 * so_pcb - The protocol stack still has a reference 341 * SS_NOFDREF - There is no longer a file pointer reference 342 */ 343 void 344 sofree(struct socket *so) 345 { 346 struct socket *head; 347 348 /* 349 * This is a bit hackish at the moment. We need to interlock 350 * any accept queue we are on before we potentially lose the 351 * last reference to avoid races against a re-reference from 352 * someone operating on the queue. 353 */ 354 while ((head = so->so_head) != NULL) { 355 lwkt_getpooltoken(head); 356 if (so->so_head == head) 357 break; 358 lwkt_relpooltoken(head); 359 } 360 361 /* 362 * Arbitrage the last free. 363 */ 364 KKASSERT(so->so_refs > 0); 365 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 366 if (head) 367 lwkt_relpooltoken(head); 368 return; 369 } 370 371 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 372 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 373 374 /* 375 * We're done, remove ourselves from the accept queue we are 376 * on, if we are on one. 377 */ 378 if (head != NULL) { 379 if (so->so_state & SS_INCOMP) { 380 TAILQ_REMOVE(&head->so_incomp, so, so_list); 381 head->so_incqlen--; 382 } else if (so->so_state & SS_COMP) { 383 /* 384 * We must not decommission a socket that's 385 * on the accept(2) queue. If we do, then 386 * accept(2) may hang after select(2) indicated 387 * that the listening socket was ready. 388 */ 389 lwkt_relpooltoken(head); 390 return; 391 } else { 392 panic("sofree: not queued"); 393 } 394 soclrstate(so, SS_INCOMP); 395 so->so_head = NULL; 396 lwkt_relpooltoken(head); 397 } 398 ssb_release(&so->so_snd, so); 399 sorflush(so); 400 sodealloc(so); 401 } 402 403 /* 404 * Close a socket on last file table reference removal. 405 * Initiate disconnect if connected. 406 * Free socket when disconnect complete. 407 */ 408 int 409 soclose(struct socket *so, int fflag) 410 { 411 int error; 412 413 funsetown(&so->so_sigio); 414 if (!use_soclose_fast || 415 (so->so_proto->pr_flags & PR_SYNC_PORT) || 416 (so->so_options & SO_LINGER)) { 417 error = soclose_sync(so, fflag); 418 } else { 419 soclose_fast(so); 420 error = 0; 421 } 422 return error; 423 } 424 425 static void 426 sodiscard(struct socket *so) 427 { 428 lwkt_getpooltoken(so); 429 if (so->so_options & SO_ACCEPTCONN) { 430 struct socket *sp; 431 432 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 433 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 434 soclrstate(sp, SS_INCOMP); 435 sp->so_head = NULL; 436 so->so_incqlen--; 437 soaborta(sp); 438 } 439 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 440 TAILQ_REMOVE(&so->so_comp, sp, so_list); 441 soclrstate(sp, SS_COMP); 442 sp->so_head = NULL; 443 so->so_qlen--; 444 soaborta(sp); 445 } 446 } 447 lwkt_relpooltoken(so); 448 449 if (so->so_state & SS_NOFDREF) 450 panic("soclose: NOFDREF"); 451 sosetstate(so, SS_NOFDREF); /* take ref */ 452 } 453 454 static int 455 soclose_sync(struct socket *so, int fflag) 456 { 457 int error = 0; 458 459 if (so->so_pcb == NULL) 460 goto discard; 461 if (so->so_state & SS_ISCONNECTED) { 462 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 463 error = sodisconnect(so); 464 if (error) 465 goto drop; 466 } 467 if (so->so_options & SO_LINGER) { 468 if ((so->so_state & SS_ISDISCONNECTING) && 469 (fflag & FNONBLOCK)) 470 goto drop; 471 while (so->so_state & SS_ISCONNECTED) { 472 error = tsleep(&so->so_timeo, PCATCH, 473 "soclos", so->so_linger * hz); 474 if (error) 475 break; 476 } 477 } 478 } 479 drop: 480 if (so->so_pcb) { 481 int error2; 482 483 error2 = so_pru_detach(so); 484 if (error == 0) 485 error = error2; 486 } 487 discard: 488 sodiscard(so); 489 so_pru_sync(so); /* unpend async sending */ 490 sofree(so); /* dispose of ref */ 491 492 return (error); 493 } 494 495 static void 496 soclose_sofree_async_handler(netmsg_t msg) 497 { 498 sofree(msg->base.nm_so); 499 } 500 501 static void 502 soclose_sofree_async(struct socket *so) 503 { 504 struct netmsg_base *base = &so->so_clomsg; 505 506 netmsg_init(base, so, &netisr_apanic_rport, 0, 507 soclose_sofree_async_handler); 508 lwkt_sendmsg(so->so_port, &base->lmsg); 509 } 510 511 static void 512 soclose_disconn_async_handler(netmsg_t msg) 513 { 514 struct socket *so = msg->base.nm_so; 515 516 if ((so->so_state & SS_ISCONNECTED) && 517 (so->so_state & SS_ISDISCONNECTING) == 0) 518 so_pru_disconnect_direct(so); 519 520 if (so->so_pcb) 521 so_pru_detach_direct(so); 522 523 sodiscard(so); 524 sofree(so); 525 } 526 527 static void 528 soclose_disconn_async(struct socket *so) 529 { 530 struct netmsg_base *base = &so->so_clomsg; 531 532 netmsg_init(base, so, &netisr_apanic_rport, 0, 533 soclose_disconn_async_handler); 534 lwkt_sendmsg(so->so_port, &base->lmsg); 535 } 536 537 static void 538 soclose_detach_async_handler(netmsg_t msg) 539 { 540 struct socket *so = msg->base.nm_so; 541 542 if (so->so_pcb) 543 so_pru_detach_direct(so); 544 545 sodiscard(so); 546 sofree(so); 547 } 548 549 static void 550 soclose_detach_async(struct socket *so) 551 { 552 struct netmsg_base *base = &so->so_clomsg; 553 554 netmsg_init(base, so, &netisr_apanic_rport, 0, 555 soclose_detach_async_handler); 556 lwkt_sendmsg(so->so_port, &base->lmsg); 557 } 558 559 static void 560 soclose_fast(struct socket *so) 561 { 562 if (so->so_pcb == NULL) 563 goto discard; 564 565 if ((so->so_state & SS_ISCONNECTED) && 566 (so->so_state & SS_ISDISCONNECTING) == 0) { 567 soclose_disconn_async(so); 568 return; 569 } 570 571 if (so->so_pcb) { 572 soclose_detach_async(so); 573 return; 574 } 575 576 discard: 577 sodiscard(so); 578 soclose_sofree_async(so); 579 } 580 581 /* 582 * Abort and destroy a socket. Only one abort can be in progress 583 * at any given moment. 584 */ 585 void 586 soabort(struct socket *so) 587 { 588 soreference(so); 589 so_pru_abort(so); 590 } 591 592 void 593 soaborta(struct socket *so) 594 { 595 soreference(so); 596 so_pru_aborta(so); 597 } 598 599 void 600 soabort_oncpu(struct socket *so) 601 { 602 soreference(so); 603 so_pru_abort_oncpu(so); 604 } 605 606 /* 607 * so is passed in ref'd, which becomes owned by 608 * the cleared SS_NOFDREF flag. 609 */ 610 void 611 soaccept_generic(struct socket *so) 612 { 613 if ((so->so_state & SS_NOFDREF) == 0) 614 panic("soaccept: !NOFDREF"); 615 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 616 } 617 618 int 619 soaccept(struct socket *so, struct sockaddr **nam) 620 { 621 int error; 622 623 soaccept_generic(so); 624 error = so_pru_accept(so, nam); 625 return (error); 626 } 627 628 int 629 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 630 { 631 int error; 632 633 if (so->so_options & SO_ACCEPTCONN) 634 return (EOPNOTSUPP); 635 /* 636 * If protocol is connection-based, can only connect once. 637 * Otherwise, if connected, try to disconnect first. 638 * This allows user to disconnect by connecting to, e.g., 639 * a null address. 640 */ 641 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 642 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 643 (error = sodisconnect(so)))) { 644 error = EISCONN; 645 } else { 646 /* 647 * Prevent accumulated error from previous connection 648 * from biting us. 649 */ 650 so->so_error = 0; 651 error = so_pru_connect(so, nam, td); 652 } 653 return (error); 654 } 655 656 int 657 soconnect2(struct socket *so1, struct socket *so2) 658 { 659 int error; 660 661 error = so_pru_connect2(so1, so2); 662 return (error); 663 } 664 665 int 666 sodisconnect(struct socket *so) 667 { 668 int error; 669 670 if ((so->so_state & SS_ISCONNECTED) == 0) { 671 error = ENOTCONN; 672 goto bad; 673 } 674 if (so->so_state & SS_ISDISCONNECTING) { 675 error = EALREADY; 676 goto bad; 677 } 678 error = so_pru_disconnect(so); 679 bad: 680 return (error); 681 } 682 683 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 684 /* 685 * Send on a socket. 686 * If send must go all at once and message is larger than 687 * send buffering, then hard error. 688 * Lock against other senders. 689 * If must go all at once and not enough room now, then 690 * inform user that this would block and do nothing. 691 * Otherwise, if nonblocking, send as much as possible. 692 * The data to be sent is described by "uio" if nonzero, 693 * otherwise by the mbuf chain "top" (which must be null 694 * if uio is not). Data provided in mbuf chain must be small 695 * enough to send all at once. 696 * 697 * Returns nonzero on error, timeout or signal; callers 698 * must check for short counts if EINTR/ERESTART are returned. 699 * Data and control buffers are freed on return. 700 */ 701 int 702 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 703 struct mbuf *top, struct mbuf *control, int flags, 704 struct thread *td) 705 { 706 struct mbuf **mp; 707 struct mbuf *m; 708 size_t resid; 709 int space, len; 710 int clen = 0, error, dontroute, mlen; 711 int atomic = sosendallatonce(so) || top; 712 int pru_flags; 713 714 if (uio) { 715 resid = uio->uio_resid; 716 } else { 717 resid = (size_t)top->m_pkthdr.len; 718 #ifdef INVARIANTS 719 len = 0; 720 for (m = top; m; m = m->m_next) 721 len += m->m_len; 722 KKASSERT(top->m_pkthdr.len == len); 723 #endif 724 } 725 726 /* 727 * WARNING! resid is unsigned, space and len are signed. space 728 * can wind up negative if the sockbuf is overcommitted. 729 * 730 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 731 * type sockets since that's an error. 732 */ 733 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 734 error = EINVAL; 735 goto out; 736 } 737 738 dontroute = 739 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 740 (so->so_proto->pr_flags & PR_ATOMIC); 741 if (td->td_lwp != NULL) 742 td->td_lwp->lwp_ru.ru_msgsnd++; 743 if (control) 744 clen = control->m_len; 745 #define gotoerr(errcode) { error = errcode; goto release; } 746 747 restart: 748 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 749 if (error) 750 goto out; 751 752 do { 753 if (so->so_state & SS_CANTSENDMORE) 754 gotoerr(EPIPE); 755 if (so->so_error) { 756 error = so->so_error; 757 so->so_error = 0; 758 goto release; 759 } 760 if ((so->so_state & SS_ISCONNECTED) == 0) { 761 /* 762 * `sendto' and `sendmsg' is allowed on a connection- 763 * based socket if it supports implied connect. 764 * Return ENOTCONN if not connected and no address is 765 * supplied. 766 */ 767 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 768 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 769 if ((so->so_state & SS_ISCONFIRMING) == 0 && 770 !(resid == 0 && clen != 0)) 771 gotoerr(ENOTCONN); 772 } else if (addr == NULL) 773 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 774 ENOTCONN : EDESTADDRREQ); 775 } 776 if ((atomic && resid > so->so_snd.ssb_hiwat) || 777 clen > so->so_snd.ssb_hiwat) { 778 gotoerr(EMSGSIZE); 779 } 780 space = ssb_space(&so->so_snd); 781 if (flags & MSG_OOB) 782 space += 1024; 783 if ((space < 0 || (size_t)space < resid + clen) && uio && 784 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 785 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 786 gotoerr(EWOULDBLOCK); 787 ssb_unlock(&so->so_snd); 788 error = ssb_wait(&so->so_snd); 789 if (error) 790 goto out; 791 goto restart; 792 } 793 mp = ⊤ 794 space -= clen; 795 do { 796 if (uio == NULL) { 797 /* 798 * Data is prepackaged in "top". 799 */ 800 resid = 0; 801 if (flags & MSG_EOR) 802 top->m_flags |= M_EOR; 803 } else do { 804 if (resid > INT_MAX) 805 resid = INT_MAX; 806 m = m_getl((int)resid, MB_WAIT, MT_DATA, 807 top == NULL ? M_PKTHDR : 0, &mlen); 808 if (top == NULL) { 809 m->m_pkthdr.len = 0; 810 m->m_pkthdr.rcvif = NULL; 811 } 812 len = imin((int)szmin(mlen, resid), space); 813 if (resid < MINCLSIZE) { 814 /* 815 * For datagram protocols, leave room 816 * for protocol headers in first mbuf. 817 */ 818 if (atomic && top == NULL && len < mlen) 819 MH_ALIGN(m, len); 820 } 821 space -= len; 822 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 823 resid = uio->uio_resid; 824 m->m_len = len; 825 *mp = m; 826 top->m_pkthdr.len += len; 827 if (error) 828 goto release; 829 mp = &m->m_next; 830 if (resid == 0) { 831 if (flags & MSG_EOR) 832 top->m_flags |= M_EOR; 833 break; 834 } 835 } while (space > 0 && atomic); 836 if (dontroute) 837 so->so_options |= SO_DONTROUTE; 838 if (flags & MSG_OOB) { 839 pru_flags = PRUS_OOB; 840 } else if ((flags & MSG_EOF) && 841 (so->so_proto->pr_flags & PR_IMPLOPCL) && 842 (resid == 0)) { 843 /* 844 * If the user set MSG_EOF, the protocol 845 * understands this flag and nothing left to 846 * send then use PRU_SEND_EOF instead of PRU_SEND. 847 */ 848 pru_flags = PRUS_EOF; 849 } else if (resid > 0 && space > 0) { 850 /* If there is more to send, set PRUS_MORETOCOME */ 851 pru_flags = PRUS_MORETOCOME; 852 } else { 853 pru_flags = 0; 854 } 855 /* 856 * XXX all the SS_CANTSENDMORE checks previously 857 * done could be out of date. We could have recieved 858 * a reset packet in an interrupt or maybe we slept 859 * while doing page faults in uiomove() etc. We could 860 * probably recheck again inside the splnet() protection 861 * here, but there are probably other places that this 862 * also happens. We must rethink this. 863 */ 864 error = so_pru_send(so, pru_flags, top, addr, control, td); 865 if (dontroute) 866 so->so_options &= ~SO_DONTROUTE; 867 clen = 0; 868 control = NULL; 869 top = NULL; 870 mp = ⊤ 871 if (error) 872 goto release; 873 } while (resid && space > 0); 874 } while (resid); 875 876 release: 877 ssb_unlock(&so->so_snd); 878 out: 879 if (top) 880 m_freem(top); 881 if (control) 882 m_freem(control); 883 return (error); 884 } 885 886 #ifdef INET 887 /* 888 * A specialization of sosend() for UDP based on protocol-specific knowledge: 889 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 890 * sosendallatonce() returns true, 891 * the "atomic" variable is true, 892 * and sosendudp() blocks until space is available for the entire send. 893 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 894 * PR_IMPLOPCL flags set. 895 * UDP has no out-of-band data. 896 * UDP has no control data. 897 * UDP does not support MSG_EOR. 898 */ 899 int 900 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 901 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 902 { 903 size_t resid; 904 int error, pru_flags = 0; 905 int space; 906 907 if (td->td_lwp != NULL) 908 td->td_lwp->lwp_ru.ru_msgsnd++; 909 if (control) 910 m_freem(control); 911 912 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 913 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 914 915 restart: 916 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 917 if (error) 918 goto out; 919 920 if (so->so_state & SS_CANTSENDMORE) 921 gotoerr(EPIPE); 922 if (so->so_error) { 923 error = so->so_error; 924 so->so_error = 0; 925 goto release; 926 } 927 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 928 gotoerr(EDESTADDRREQ); 929 if (resid > so->so_snd.ssb_hiwat) 930 gotoerr(EMSGSIZE); 931 space = ssb_space(&so->so_snd); 932 if (uio && (space < 0 || (size_t)space < resid)) { 933 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 934 gotoerr(EWOULDBLOCK); 935 ssb_unlock(&so->so_snd); 936 error = ssb_wait(&so->so_snd); 937 if (error) 938 goto out; 939 goto restart; 940 } 941 942 if (uio) { 943 int hdrlen = max_hdr; 944 945 /* 946 * We try to optimize out the additional mbuf 947 * allocations in M_PREPEND() on output path, e.g. 948 * - udp_output(), when it tries to prepend protocol 949 * headers. 950 * - Link layer output function, when it tries to 951 * prepend link layer header. 952 * 953 * This probably will not benefit any data that will 954 * be fragmented, so this optimization is only performed 955 * when the size of data and max size of protocol+link 956 * headers fit into one mbuf cluster. 957 */ 958 if (uio->uio_resid > MCLBYTES - hdrlen || 959 !udp_sosend_prepend) { 960 top = m_uiomove(uio); 961 if (top == NULL) 962 goto release; 963 } else { 964 int nsize; 965 966 top = m_getl(uio->uio_resid + hdrlen, MB_WAIT, 967 MT_DATA, M_PKTHDR, &nsize); 968 KASSERT(nsize >= uio->uio_resid + hdrlen, 969 ("sosendudp invalid nsize %d, " 970 "resid %zu, hdrlen %d", 971 nsize, uio->uio_resid, hdrlen)); 972 973 top->m_len = uio->uio_resid; 974 top->m_pkthdr.len = uio->uio_resid; 975 top->m_data += hdrlen; 976 977 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 978 if (error) 979 goto out; 980 } 981 } 982 983 if (flags & MSG_DONTROUTE) 984 pru_flags |= PRUS_DONTROUTE; 985 986 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 987 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 988 error = 0; 989 } else { 990 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 991 } 992 top = NULL; /* sent or freed in lower layer */ 993 994 release: 995 ssb_unlock(&so->so_snd); 996 out: 997 if (top) 998 m_freem(top); 999 return (error); 1000 } 1001 1002 int 1003 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1004 struct mbuf *top, struct mbuf *control, int flags, 1005 struct thread *td) 1006 { 1007 struct mbuf **mp; 1008 struct mbuf *m; 1009 size_t resid; 1010 int space, len; 1011 int error, mlen; 1012 int allatonce; 1013 int pru_flags; 1014 1015 if (uio) { 1016 KKASSERT(top == NULL); 1017 allatonce = 0; 1018 resid = uio->uio_resid; 1019 } else { 1020 allatonce = 1; 1021 resid = (size_t)top->m_pkthdr.len; 1022 #ifdef INVARIANTS 1023 len = 0; 1024 for (m = top; m; m = m->m_next) 1025 len += m->m_len; 1026 KKASSERT(top->m_pkthdr.len == len); 1027 #endif 1028 } 1029 1030 /* 1031 * WARNING! resid is unsigned, space and len are signed. space 1032 * can wind up negative if the sockbuf is overcommitted. 1033 * 1034 * Also check to make sure that MSG_EOR isn't used on TCP 1035 */ 1036 if (flags & MSG_EOR) { 1037 error = EINVAL; 1038 goto out; 1039 } 1040 1041 if (control) { 1042 /* TCP doesn't do control messages (rights, creds, etc) */ 1043 if (control->m_len) { 1044 error = EINVAL; 1045 goto out; 1046 } 1047 m_freem(control); /* empty control, just free it */ 1048 control = NULL; 1049 } 1050 1051 if (td->td_lwp != NULL) 1052 td->td_lwp->lwp_ru.ru_msgsnd++; 1053 1054 #define gotoerr(errcode) { error = errcode; goto release; } 1055 1056 restart: 1057 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1058 if (error) 1059 goto out; 1060 1061 do { 1062 if (so->so_state & SS_CANTSENDMORE) 1063 gotoerr(EPIPE); 1064 if (so->so_error) { 1065 error = so->so_error; 1066 so->so_error = 0; 1067 goto release; 1068 } 1069 if ((so->so_state & SS_ISCONNECTED) == 0 && 1070 (so->so_state & SS_ISCONFIRMING) == 0) 1071 gotoerr(ENOTCONN); 1072 if (allatonce && resid > so->so_snd.ssb_hiwat) 1073 gotoerr(EMSGSIZE); 1074 1075 space = ssb_space_prealloc(&so->so_snd); 1076 if (flags & MSG_OOB) 1077 space += 1024; 1078 if ((space < 0 || (size_t)space < resid) && !allatonce && 1079 space < so->so_snd.ssb_lowat) { 1080 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1081 gotoerr(EWOULDBLOCK); 1082 ssb_unlock(&so->so_snd); 1083 error = ssb_wait(&so->so_snd); 1084 if (error) 1085 goto out; 1086 goto restart; 1087 } 1088 mp = ⊤ 1089 do { 1090 int cnt = 0, async = 0; 1091 1092 if (uio == NULL) { 1093 /* 1094 * Data is prepackaged in "top". 1095 */ 1096 resid = 0; 1097 } else do { 1098 if (resid > INT_MAX) 1099 resid = INT_MAX; 1100 m = m_getl((int)resid, MB_WAIT, MT_DATA, 1101 top == NULL ? M_PKTHDR : 0, &mlen); 1102 if (top == NULL) { 1103 m->m_pkthdr.len = 0; 1104 m->m_pkthdr.rcvif = NULL; 1105 } 1106 len = imin((int)szmin(mlen, resid), space); 1107 space -= len; 1108 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1109 resid = uio->uio_resid; 1110 m->m_len = len; 1111 *mp = m; 1112 top->m_pkthdr.len += len; 1113 if (error) 1114 goto release; 1115 mp = &m->m_next; 1116 if (resid == 0) 1117 break; 1118 ++cnt; 1119 } while (space > 0 && cnt < tcp_sosend_agglim); 1120 1121 if (tcp_sosend_async) 1122 async = 1; 1123 1124 if (flags & MSG_OOB) { 1125 pru_flags = PRUS_OOB; 1126 async = 0; 1127 } else if ((flags & MSG_EOF) && resid == 0) { 1128 pru_flags = PRUS_EOF; 1129 } else if (resid > 0 && space > 0) { 1130 /* If there is more to send, set PRUS_MORETOCOME */ 1131 pru_flags = PRUS_MORETOCOME; 1132 async = 1; 1133 } else { 1134 pru_flags = 0; 1135 } 1136 1137 if (flags & MSG_SYNC) 1138 async = 0; 1139 1140 /* 1141 * XXX all the SS_CANTSENDMORE checks previously 1142 * done could be out of date. We could have recieved 1143 * a reset packet in an interrupt or maybe we slept 1144 * while doing page faults in uiomove() etc. We could 1145 * probably recheck again inside the splnet() protection 1146 * here, but there are probably other places that this 1147 * also happens. We must rethink this. 1148 */ 1149 for (m = top; m; m = m->m_next) 1150 ssb_preallocstream(&so->so_snd, m); 1151 if (!async) { 1152 error = so_pru_send(so, pru_flags, top, 1153 NULL, NULL, td); 1154 } else { 1155 so_pru_send_async(so, pru_flags, top, 1156 NULL, NULL, td); 1157 error = 0; 1158 } 1159 1160 top = NULL; 1161 mp = ⊤ 1162 if (error) 1163 goto release; 1164 } while (resid && space > 0); 1165 } while (resid); 1166 1167 release: 1168 ssb_unlock(&so->so_snd); 1169 out: 1170 if (top) 1171 m_freem(top); 1172 if (control) 1173 m_freem(control); 1174 return (error); 1175 } 1176 #endif 1177 1178 /* 1179 * Implement receive operations on a socket. 1180 * 1181 * We depend on the way that records are added to the signalsockbuf 1182 * by sbappend*. In particular, each record (mbufs linked through m_next) 1183 * must begin with an address if the protocol so specifies, 1184 * followed by an optional mbuf or mbufs containing ancillary data, 1185 * and then zero or more mbufs of data. 1186 * 1187 * Although the signalsockbuf is locked, new data may still be appended. 1188 * A token inside the ssb_lock deals with MP issues and still allows 1189 * the network to access the socket if we block in a uio. 1190 * 1191 * The caller may receive the data as a single mbuf chain by supplying 1192 * an mbuf **mp0 for use in returning the chain. The uio is then used 1193 * only for the count in uio_resid. 1194 */ 1195 int 1196 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1197 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1198 { 1199 struct mbuf *m, *n; 1200 struct mbuf *free_chain = NULL; 1201 int flags, len, error, offset; 1202 struct protosw *pr = so->so_proto; 1203 int moff, type = 0; 1204 size_t resid, orig_resid; 1205 1206 if (uio) 1207 resid = uio->uio_resid; 1208 else 1209 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1210 orig_resid = resid; 1211 1212 if (psa) 1213 *psa = NULL; 1214 if (controlp) 1215 *controlp = NULL; 1216 if (flagsp) 1217 flags = *flagsp &~ MSG_EOR; 1218 else 1219 flags = 0; 1220 if (flags & MSG_OOB) { 1221 m = m_get(MB_WAIT, MT_DATA); 1222 if (m == NULL) 1223 return (ENOBUFS); 1224 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1225 if (error) 1226 goto bad; 1227 if (sio) { 1228 do { 1229 sbappend(sio, m); 1230 KKASSERT(resid >= (size_t)m->m_len); 1231 resid -= (size_t)m->m_len; 1232 } while (resid > 0 && m); 1233 } else { 1234 do { 1235 uio->uio_resid = resid; 1236 error = uiomove(mtod(m, caddr_t), 1237 (int)szmin(resid, m->m_len), 1238 uio); 1239 resid = uio->uio_resid; 1240 m = m_free(m); 1241 } while (uio->uio_resid && error == 0 && m); 1242 } 1243 bad: 1244 if (m) 1245 m_freem(m); 1246 return (error); 1247 } 1248 if ((so->so_state & SS_ISCONFIRMING) && resid) 1249 so_pru_rcvd(so, 0); 1250 1251 /* 1252 * The token interlocks against the protocol thread while 1253 * ssb_lock is a blocking lock against other userland entities. 1254 */ 1255 lwkt_gettoken(&so->so_rcv.ssb_token); 1256 restart: 1257 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1258 if (error) 1259 goto done; 1260 1261 m = so->so_rcv.ssb_mb; 1262 /* 1263 * If we have less data than requested, block awaiting more 1264 * (subject to any timeout) if: 1265 * 1. the current count is less than the low water mark, or 1266 * 2. MSG_WAITALL is set, and it is possible to do the entire 1267 * receive operation at once if we block (resid <= hiwat). 1268 * 3. MSG_DONTWAIT is not set 1269 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1270 * we have to do the receive in sections, and thus risk returning 1271 * a short count if a timeout or signal occurs after we start. 1272 */ 1273 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1274 (size_t)so->so_rcv.ssb_cc < resid) && 1275 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1276 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1277 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1278 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1279 if (so->so_error) { 1280 if (m) 1281 goto dontblock; 1282 error = so->so_error; 1283 if ((flags & MSG_PEEK) == 0) 1284 so->so_error = 0; 1285 goto release; 1286 } 1287 if (so->so_state & SS_CANTRCVMORE) { 1288 if (m) 1289 goto dontblock; 1290 else 1291 goto release; 1292 } 1293 for (; m; m = m->m_next) { 1294 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1295 m = so->so_rcv.ssb_mb; 1296 goto dontblock; 1297 } 1298 } 1299 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1300 (pr->pr_flags & PR_CONNREQUIRED)) { 1301 error = ENOTCONN; 1302 goto release; 1303 } 1304 if (resid == 0) 1305 goto release; 1306 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1307 error = EWOULDBLOCK; 1308 goto release; 1309 } 1310 ssb_unlock(&so->so_rcv); 1311 error = ssb_wait(&so->so_rcv); 1312 if (error) 1313 goto done; 1314 goto restart; 1315 } 1316 dontblock: 1317 if (uio && uio->uio_td && uio->uio_td->td_proc) 1318 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1319 1320 /* 1321 * note: m should be == sb_mb here. Cache the next record while 1322 * cleaning up. Note that calling m_free*() will break out critical 1323 * section. 1324 */ 1325 KKASSERT(m == so->so_rcv.ssb_mb); 1326 1327 /* 1328 * Skip any address mbufs prepending the record. 1329 */ 1330 if (pr->pr_flags & PR_ADDR) { 1331 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1332 orig_resid = 0; 1333 if (psa) 1334 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1335 if (flags & MSG_PEEK) 1336 m = m->m_next; 1337 else 1338 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1339 } 1340 1341 /* 1342 * Skip any control mbufs prepending the record. 1343 */ 1344 #ifdef SCTP 1345 if (pr->pr_flags & PR_ADDR_OPT) { 1346 /* 1347 * For SCTP we may be getting a 1348 * whole message OR a partial delivery. 1349 */ 1350 if (m && m->m_type == MT_SONAME) { 1351 orig_resid = 0; 1352 if (psa) 1353 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1354 if (flags & MSG_PEEK) 1355 m = m->m_next; 1356 else 1357 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1358 } 1359 } 1360 #endif /* SCTP */ 1361 while (m && m->m_type == MT_CONTROL && error == 0) { 1362 if (flags & MSG_PEEK) { 1363 if (controlp) 1364 *controlp = m_copy(m, 0, m->m_len); 1365 m = m->m_next; /* XXX race */ 1366 } else { 1367 if (controlp) { 1368 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1369 if (pr->pr_domain->dom_externalize && 1370 mtod(m, struct cmsghdr *)->cmsg_type == 1371 SCM_RIGHTS) 1372 error = (*pr->pr_domain->dom_externalize)(m); 1373 *controlp = m; 1374 m = n; 1375 } else { 1376 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1377 } 1378 } 1379 if (controlp && *controlp) { 1380 orig_resid = 0; 1381 controlp = &(*controlp)->m_next; 1382 } 1383 } 1384 1385 /* 1386 * flag OOB data. 1387 */ 1388 if (m) { 1389 type = m->m_type; 1390 if (type == MT_OOBDATA) 1391 flags |= MSG_OOB; 1392 } 1393 1394 /* 1395 * Copy to the UIO or mbuf return chain (*mp). 1396 */ 1397 moff = 0; 1398 offset = 0; 1399 while (m && resid > 0 && error == 0) { 1400 if (m->m_type == MT_OOBDATA) { 1401 if (type != MT_OOBDATA) 1402 break; 1403 } else if (type == MT_OOBDATA) 1404 break; 1405 else 1406 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1407 ("receive 3")); 1408 soclrstate(so, SS_RCVATMARK); 1409 len = (resid > INT_MAX) ? INT_MAX : resid; 1410 if (so->so_oobmark && len > so->so_oobmark - offset) 1411 len = so->so_oobmark - offset; 1412 if (len > m->m_len - moff) 1413 len = m->m_len - moff; 1414 1415 /* 1416 * Copy out to the UIO or pass the mbufs back to the SIO. 1417 * The SIO is dealt with when we eat the mbuf, but deal 1418 * with the resid here either way. 1419 */ 1420 if (uio) { 1421 uio->uio_resid = resid; 1422 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1423 resid = uio->uio_resid; 1424 if (error) 1425 goto release; 1426 } else { 1427 resid -= (size_t)len; 1428 } 1429 1430 /* 1431 * Eat the entire mbuf or just a piece of it 1432 */ 1433 if (len == m->m_len - moff) { 1434 if (m->m_flags & M_EOR) 1435 flags |= MSG_EOR; 1436 #ifdef SCTP 1437 if (m->m_flags & M_NOTIFICATION) 1438 flags |= MSG_NOTIFICATION; 1439 #endif /* SCTP */ 1440 if (flags & MSG_PEEK) { 1441 m = m->m_next; 1442 moff = 0; 1443 } else { 1444 if (sio) { 1445 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1446 sbappend(sio, m); 1447 m = n; 1448 } else { 1449 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1450 } 1451 } 1452 } else { 1453 if (flags & MSG_PEEK) { 1454 moff += len; 1455 } else { 1456 if (sio) { 1457 n = m_copym(m, 0, len, MB_WAIT); 1458 if (n) 1459 sbappend(sio, n); 1460 } 1461 m->m_data += len; 1462 m->m_len -= len; 1463 so->so_rcv.ssb_cc -= len; 1464 } 1465 } 1466 if (so->so_oobmark) { 1467 if ((flags & MSG_PEEK) == 0) { 1468 so->so_oobmark -= len; 1469 if (so->so_oobmark == 0) { 1470 sosetstate(so, SS_RCVATMARK); 1471 break; 1472 } 1473 } else { 1474 offset += len; 1475 if (offset == so->so_oobmark) 1476 break; 1477 } 1478 } 1479 if (flags & MSG_EOR) 1480 break; 1481 /* 1482 * If the MSG_WAITALL flag is set (for non-atomic socket), 1483 * we must not quit until resid == 0 or an error 1484 * termination. If a signal/timeout occurs, return 1485 * with a short count but without error. 1486 * Keep signalsockbuf locked against other readers. 1487 */ 1488 while ((flags & MSG_WAITALL) && m == NULL && 1489 resid > 0 && !sosendallatonce(so) && 1490 so->so_rcv.ssb_mb == NULL) { 1491 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1492 break; 1493 /* 1494 * The window might have closed to zero, make 1495 * sure we send an ack now that we've drained 1496 * the buffer or we might end up blocking until 1497 * the idle takes over (5 seconds). 1498 */ 1499 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1500 so_pru_rcvd(so, flags); 1501 error = ssb_wait(&so->so_rcv); 1502 if (error) { 1503 ssb_unlock(&so->so_rcv); 1504 error = 0; 1505 goto done; 1506 } 1507 m = so->so_rcv.ssb_mb; 1508 } 1509 } 1510 1511 /* 1512 * If an atomic read was requested but unread data still remains 1513 * in the record, set MSG_TRUNC. 1514 */ 1515 if (m && pr->pr_flags & PR_ATOMIC) 1516 flags |= MSG_TRUNC; 1517 1518 /* 1519 * Cleanup. If an atomic read was requested drop any unread data. 1520 */ 1521 if ((flags & MSG_PEEK) == 0) { 1522 if (m && (pr->pr_flags & PR_ATOMIC)) 1523 sbdroprecord(&so->so_rcv.sb); 1524 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1525 so_pru_rcvd(so, flags); 1526 } 1527 1528 if (orig_resid == resid && orig_resid && 1529 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1530 ssb_unlock(&so->so_rcv); 1531 goto restart; 1532 } 1533 1534 if (flagsp) 1535 *flagsp |= flags; 1536 release: 1537 ssb_unlock(&so->so_rcv); 1538 done: 1539 lwkt_reltoken(&so->so_rcv.ssb_token); 1540 if (free_chain) 1541 m_freem(free_chain); 1542 return (error); 1543 } 1544 1545 int 1546 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1547 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1548 { 1549 struct mbuf *m, *n; 1550 struct mbuf *free_chain = NULL; 1551 int flags, len, error, offset; 1552 struct protosw *pr = so->so_proto; 1553 int moff; 1554 size_t resid, orig_resid; 1555 1556 if (uio) 1557 resid = uio->uio_resid; 1558 else 1559 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1560 orig_resid = resid; 1561 1562 if (psa) 1563 *psa = NULL; 1564 if (controlp) 1565 *controlp = NULL; 1566 if (flagsp) 1567 flags = *flagsp &~ MSG_EOR; 1568 else 1569 flags = 0; 1570 if (flags & MSG_OOB) { 1571 m = m_get(MB_WAIT, MT_DATA); 1572 if (m == NULL) 1573 return (ENOBUFS); 1574 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1575 if (error) 1576 goto bad; 1577 if (sio) { 1578 do { 1579 sbappend(sio, m); 1580 KKASSERT(resid >= (size_t)m->m_len); 1581 resid -= (size_t)m->m_len; 1582 } while (resid > 0 && m); 1583 } else { 1584 do { 1585 uio->uio_resid = resid; 1586 error = uiomove(mtod(m, caddr_t), 1587 (int)szmin(resid, m->m_len), 1588 uio); 1589 resid = uio->uio_resid; 1590 m = m_free(m); 1591 } while (uio->uio_resid && error == 0 && m); 1592 } 1593 bad: 1594 if (m) 1595 m_freem(m); 1596 return (error); 1597 } 1598 1599 /* 1600 * The token interlocks against the protocol thread while 1601 * ssb_lock is a blocking lock against other userland entities. 1602 */ 1603 lwkt_gettoken(&so->so_rcv.ssb_token); 1604 restart: 1605 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1606 if (error) 1607 goto done; 1608 1609 m = so->so_rcv.ssb_mb; 1610 /* 1611 * If we have less data than requested, block awaiting more 1612 * (subject to any timeout) if: 1613 * 1. the current count is less than the low water mark, or 1614 * 2. MSG_WAITALL is set, and it is possible to do the entire 1615 * receive operation at once if we block (resid <= hiwat). 1616 * 3. MSG_DONTWAIT is not set 1617 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1618 * we have to do the receive in sections, and thus risk returning 1619 * a short count if a timeout or signal occurs after we start. 1620 */ 1621 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1622 (size_t)so->so_rcv.ssb_cc < resid) && 1623 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1624 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1625 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1626 if (so->so_error) { 1627 if (m) 1628 goto dontblock; 1629 error = so->so_error; 1630 if ((flags & MSG_PEEK) == 0) 1631 so->so_error = 0; 1632 goto release; 1633 } 1634 if (so->so_state & SS_CANTRCVMORE) { 1635 if (m) 1636 goto dontblock; 1637 else 1638 goto release; 1639 } 1640 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1641 (pr->pr_flags & PR_CONNREQUIRED)) { 1642 error = ENOTCONN; 1643 goto release; 1644 } 1645 if (resid == 0) 1646 goto release; 1647 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1648 error = EWOULDBLOCK; 1649 goto release; 1650 } 1651 ssb_unlock(&so->so_rcv); 1652 error = ssb_wait(&so->so_rcv); 1653 if (error) 1654 goto done; 1655 goto restart; 1656 } 1657 dontblock: 1658 if (uio && uio->uio_td && uio->uio_td->td_proc) 1659 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1660 1661 /* 1662 * note: m should be == sb_mb here. Cache the next record while 1663 * cleaning up. Note that calling m_free*() will break out critical 1664 * section. 1665 */ 1666 KKASSERT(m == so->so_rcv.ssb_mb); 1667 1668 /* 1669 * Copy to the UIO or mbuf return chain (*mp). 1670 */ 1671 moff = 0; 1672 offset = 0; 1673 while (m && resid > 0 && error == 0) { 1674 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1675 ("receive 3")); 1676 1677 soclrstate(so, SS_RCVATMARK); 1678 len = (resid > INT_MAX) ? INT_MAX : resid; 1679 if (so->so_oobmark && len > so->so_oobmark - offset) 1680 len = so->so_oobmark - offset; 1681 if (len > m->m_len - moff) 1682 len = m->m_len - moff; 1683 1684 /* 1685 * Copy out to the UIO or pass the mbufs back to the SIO. 1686 * The SIO is dealt with when we eat the mbuf, but deal 1687 * with the resid here either way. 1688 */ 1689 if (uio) { 1690 uio->uio_resid = resid; 1691 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1692 resid = uio->uio_resid; 1693 if (error) 1694 goto release; 1695 } else { 1696 resid -= (size_t)len; 1697 } 1698 1699 /* 1700 * Eat the entire mbuf or just a piece of it 1701 */ 1702 if (len == m->m_len - moff) { 1703 if (flags & MSG_PEEK) { 1704 m = m->m_next; 1705 moff = 0; 1706 } else { 1707 if (sio) { 1708 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1709 sbappend(sio, m); 1710 m = n; 1711 } else { 1712 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1713 } 1714 } 1715 } else { 1716 if (flags & MSG_PEEK) { 1717 moff += len; 1718 } else { 1719 if (sio) { 1720 n = m_copym(m, 0, len, MB_WAIT); 1721 if (n) 1722 sbappend(sio, n); 1723 } 1724 m->m_data += len; 1725 m->m_len -= len; 1726 so->so_rcv.ssb_cc -= len; 1727 } 1728 } 1729 if (so->so_oobmark) { 1730 if ((flags & MSG_PEEK) == 0) { 1731 so->so_oobmark -= len; 1732 if (so->so_oobmark == 0) { 1733 sosetstate(so, SS_RCVATMARK); 1734 break; 1735 } 1736 } else { 1737 offset += len; 1738 if (offset == so->so_oobmark) 1739 break; 1740 } 1741 } 1742 /* 1743 * If the MSG_WAITALL flag is set (for non-atomic socket), 1744 * we must not quit until resid == 0 or an error 1745 * termination. If a signal/timeout occurs, return 1746 * with a short count but without error. 1747 * Keep signalsockbuf locked against other readers. 1748 */ 1749 while ((flags & MSG_WAITALL) && m == NULL && 1750 resid > 0 && !sosendallatonce(so) && 1751 so->so_rcv.ssb_mb == NULL) { 1752 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1753 break; 1754 /* 1755 * The window might have closed to zero, make 1756 * sure we send an ack now that we've drained 1757 * the buffer or we might end up blocking until 1758 * the idle takes over (5 seconds). 1759 */ 1760 if (so->so_pcb) 1761 so_pru_rcvd_async(so); 1762 error = ssb_wait(&so->so_rcv); 1763 if (error) { 1764 ssb_unlock(&so->so_rcv); 1765 error = 0; 1766 goto done; 1767 } 1768 m = so->so_rcv.ssb_mb; 1769 } 1770 } 1771 1772 /* 1773 * Cleanup. If an atomic read was requested drop any unread data. 1774 */ 1775 if ((flags & MSG_PEEK) == 0) { 1776 if (so->so_pcb) 1777 so_pru_rcvd_async(so); 1778 } 1779 1780 if (orig_resid == resid && orig_resid && 1781 (so->so_state & SS_CANTRCVMORE) == 0) { 1782 ssb_unlock(&so->so_rcv); 1783 goto restart; 1784 } 1785 1786 if (flagsp) 1787 *flagsp |= flags; 1788 release: 1789 ssb_unlock(&so->so_rcv); 1790 done: 1791 lwkt_reltoken(&so->so_rcv.ssb_token); 1792 if (free_chain) 1793 m_freem(free_chain); 1794 return (error); 1795 } 1796 1797 /* 1798 * Shut a socket down. Note that we do not get a frontend lock as we 1799 * want to be able to shut the socket down even if another thread is 1800 * blocked in a read(), thus waking it up. 1801 */ 1802 int 1803 soshutdown(struct socket *so, int how) 1804 { 1805 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1806 return (EINVAL); 1807 1808 if (how != SHUT_WR) { 1809 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1810 sorflush(so); 1811 /*ssb_unlock(&so->so_rcv);*/ 1812 } 1813 if (how != SHUT_RD) 1814 return (so_pru_shutdown(so)); 1815 return (0); 1816 } 1817 1818 void 1819 sorflush(struct socket *so) 1820 { 1821 struct signalsockbuf *ssb = &so->so_rcv; 1822 struct protosw *pr = so->so_proto; 1823 struct signalsockbuf asb; 1824 1825 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1826 1827 lwkt_gettoken(&ssb->ssb_token); 1828 socantrcvmore(so); 1829 asb = *ssb; 1830 1831 /* 1832 * Can't just blow up the ssb structure here 1833 */ 1834 bzero(&ssb->sb, sizeof(ssb->sb)); 1835 ssb->ssb_timeo = 0; 1836 ssb->ssb_lowat = 0; 1837 ssb->ssb_hiwat = 0; 1838 ssb->ssb_mbmax = 0; 1839 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1840 1841 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1842 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1843 ssb_release(&asb, so); 1844 1845 lwkt_reltoken(&ssb->ssb_token); 1846 } 1847 1848 #ifdef INET 1849 static int 1850 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1851 { 1852 struct accept_filter_arg *afap = NULL; 1853 struct accept_filter *afp; 1854 struct so_accf *af = so->so_accf; 1855 int error = 0; 1856 1857 /* do not set/remove accept filters on non listen sockets */ 1858 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1859 error = EINVAL; 1860 goto out; 1861 } 1862 1863 /* removing the filter */ 1864 if (sopt == NULL) { 1865 if (af != NULL) { 1866 if (af->so_accept_filter != NULL && 1867 af->so_accept_filter->accf_destroy != NULL) { 1868 af->so_accept_filter->accf_destroy(so); 1869 } 1870 if (af->so_accept_filter_str != NULL) { 1871 kfree(af->so_accept_filter_str, M_ACCF); 1872 } 1873 kfree(af, M_ACCF); 1874 so->so_accf = NULL; 1875 } 1876 so->so_options &= ~SO_ACCEPTFILTER; 1877 return (0); 1878 } 1879 /* adding a filter */ 1880 /* must remove previous filter first */ 1881 if (af != NULL) { 1882 error = EINVAL; 1883 goto out; 1884 } 1885 /* don't put large objects on the kernel stack */ 1886 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 1887 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1888 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1889 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1890 if (error) 1891 goto out; 1892 afp = accept_filt_get(afap->af_name); 1893 if (afp == NULL) { 1894 error = ENOENT; 1895 goto out; 1896 } 1897 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1898 if (afp->accf_create != NULL) { 1899 if (afap->af_name[0] != '\0') { 1900 int len = strlen(afap->af_name) + 1; 1901 1902 af->so_accept_filter_str = kmalloc(len, M_ACCF, 1903 M_WAITOK); 1904 strcpy(af->so_accept_filter_str, afap->af_name); 1905 } 1906 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1907 if (af->so_accept_filter_arg == NULL) { 1908 kfree(af->so_accept_filter_str, M_ACCF); 1909 kfree(af, M_ACCF); 1910 so->so_accf = NULL; 1911 error = EINVAL; 1912 goto out; 1913 } 1914 } 1915 af->so_accept_filter = afp; 1916 so->so_accf = af; 1917 so->so_options |= SO_ACCEPTFILTER; 1918 out: 1919 if (afap != NULL) 1920 kfree(afap, M_TEMP); 1921 return (error); 1922 } 1923 #endif /* INET */ 1924 1925 /* 1926 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1927 * an additional variant to handle the case where the option value needs 1928 * to be some kind of integer, but not a specific size. 1929 * In addition to their use here, these functions are also called by the 1930 * protocol-level pr_ctloutput() routines. 1931 */ 1932 int 1933 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1934 { 1935 return soopt_to_kbuf(sopt, buf, len, minlen); 1936 } 1937 1938 int 1939 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1940 { 1941 size_t valsize; 1942 1943 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1944 KKASSERT(kva_p(buf)); 1945 1946 /* 1947 * If the user gives us more than we wanted, we ignore it, 1948 * but if we don't get the minimum length the caller 1949 * wants, we return EINVAL. On success, sopt->sopt_valsize 1950 * is set to however much we actually retrieved. 1951 */ 1952 if ((valsize = sopt->sopt_valsize) < minlen) 1953 return EINVAL; 1954 if (valsize > len) 1955 sopt->sopt_valsize = valsize = len; 1956 1957 bcopy(sopt->sopt_val, buf, valsize); 1958 return 0; 1959 } 1960 1961 1962 int 1963 sosetopt(struct socket *so, struct sockopt *sopt) 1964 { 1965 int error, optval; 1966 struct linger l; 1967 struct timeval tv; 1968 u_long val; 1969 struct signalsockbuf *sotmp; 1970 1971 error = 0; 1972 sopt->sopt_dir = SOPT_SET; 1973 if (sopt->sopt_level != SOL_SOCKET) { 1974 if (so->so_proto && so->so_proto->pr_ctloutput) { 1975 return (so_pr_ctloutput(so, sopt)); 1976 } 1977 error = ENOPROTOOPT; 1978 } else { 1979 switch (sopt->sopt_name) { 1980 #ifdef INET 1981 case SO_ACCEPTFILTER: 1982 error = do_setopt_accept_filter(so, sopt); 1983 if (error) 1984 goto bad; 1985 break; 1986 #endif /* INET */ 1987 case SO_LINGER: 1988 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1989 if (error) 1990 goto bad; 1991 1992 so->so_linger = l.l_linger; 1993 if (l.l_onoff) 1994 so->so_options |= SO_LINGER; 1995 else 1996 so->so_options &= ~SO_LINGER; 1997 break; 1998 1999 case SO_DEBUG: 2000 case SO_KEEPALIVE: 2001 case SO_DONTROUTE: 2002 case SO_USELOOPBACK: 2003 case SO_BROADCAST: 2004 case SO_REUSEADDR: 2005 case SO_REUSEPORT: 2006 case SO_OOBINLINE: 2007 case SO_TIMESTAMP: 2008 case SO_NOSIGPIPE: 2009 error = sooptcopyin(sopt, &optval, sizeof optval, 2010 sizeof optval); 2011 if (error) 2012 goto bad; 2013 if (optval) 2014 so->so_options |= sopt->sopt_name; 2015 else 2016 so->so_options &= ~sopt->sopt_name; 2017 break; 2018 2019 case SO_SNDBUF: 2020 case SO_RCVBUF: 2021 case SO_SNDLOWAT: 2022 case SO_RCVLOWAT: 2023 error = sooptcopyin(sopt, &optval, sizeof optval, 2024 sizeof optval); 2025 if (error) 2026 goto bad; 2027 2028 /* 2029 * Values < 1 make no sense for any of these 2030 * options, so disallow them. 2031 */ 2032 if (optval < 1) { 2033 error = EINVAL; 2034 goto bad; 2035 } 2036 2037 switch (sopt->sopt_name) { 2038 case SO_SNDBUF: 2039 case SO_RCVBUF: 2040 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2041 &so->so_snd : &so->so_rcv, (u_long)optval, 2042 so, 2043 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2044 error = ENOBUFS; 2045 goto bad; 2046 } 2047 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2048 &so->so_snd : &so->so_rcv; 2049 atomic_clear_int(&sotmp->ssb_flags, 2050 SSB_AUTOSIZE); 2051 break; 2052 2053 /* 2054 * Make sure the low-water is never greater than 2055 * the high-water. 2056 */ 2057 case SO_SNDLOWAT: 2058 so->so_snd.ssb_lowat = 2059 (optval > so->so_snd.ssb_hiwat) ? 2060 so->so_snd.ssb_hiwat : optval; 2061 atomic_clear_int(&so->so_snd.ssb_flags, 2062 SSB_AUTOLOWAT); 2063 break; 2064 case SO_RCVLOWAT: 2065 so->so_rcv.ssb_lowat = 2066 (optval > so->so_rcv.ssb_hiwat) ? 2067 so->so_rcv.ssb_hiwat : optval; 2068 atomic_clear_int(&so->so_rcv.ssb_flags, 2069 SSB_AUTOLOWAT); 2070 break; 2071 } 2072 break; 2073 2074 case SO_SNDTIMEO: 2075 case SO_RCVTIMEO: 2076 error = sooptcopyin(sopt, &tv, sizeof tv, 2077 sizeof tv); 2078 if (error) 2079 goto bad; 2080 2081 /* assert(hz > 0); */ 2082 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2083 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2084 error = EDOM; 2085 goto bad; 2086 } 2087 /* assert(tick > 0); */ 2088 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2089 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2090 if (val > INT_MAX) { 2091 error = EDOM; 2092 goto bad; 2093 } 2094 if (val == 0 && tv.tv_usec != 0) 2095 val = 1; 2096 2097 switch (sopt->sopt_name) { 2098 case SO_SNDTIMEO: 2099 so->so_snd.ssb_timeo = val; 2100 break; 2101 case SO_RCVTIMEO: 2102 so->so_rcv.ssb_timeo = val; 2103 break; 2104 } 2105 break; 2106 default: 2107 error = ENOPROTOOPT; 2108 break; 2109 } 2110 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2111 (void) so_pr_ctloutput(so, sopt); 2112 } 2113 } 2114 bad: 2115 return (error); 2116 } 2117 2118 /* Helper routine for getsockopt */ 2119 int 2120 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2121 { 2122 soopt_from_kbuf(sopt, buf, len); 2123 return 0; 2124 } 2125 2126 void 2127 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2128 { 2129 size_t valsize; 2130 2131 if (len == 0) { 2132 sopt->sopt_valsize = 0; 2133 return; 2134 } 2135 2136 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2137 KKASSERT(kva_p(buf)); 2138 2139 /* 2140 * Documented get behavior is that we always return a value, 2141 * possibly truncated to fit in the user's buffer. 2142 * Traditional behavior is that we always tell the user 2143 * precisely how much we copied, rather than something useful 2144 * like the total amount we had available for her. 2145 * Note that this interface is not idempotent; the entire answer must 2146 * generated ahead of time. 2147 */ 2148 valsize = szmin(len, sopt->sopt_valsize); 2149 sopt->sopt_valsize = valsize; 2150 if (sopt->sopt_val != 0) { 2151 bcopy(buf, sopt->sopt_val, valsize); 2152 } 2153 } 2154 2155 int 2156 sogetopt(struct socket *so, struct sockopt *sopt) 2157 { 2158 int error, optval; 2159 long optval_l; 2160 struct linger l; 2161 struct timeval tv; 2162 #ifdef INET 2163 struct accept_filter_arg *afap; 2164 #endif 2165 2166 error = 0; 2167 sopt->sopt_dir = SOPT_GET; 2168 if (sopt->sopt_level != SOL_SOCKET) { 2169 if (so->so_proto && so->so_proto->pr_ctloutput) { 2170 return (so_pr_ctloutput(so, sopt)); 2171 } else 2172 return (ENOPROTOOPT); 2173 } else { 2174 switch (sopt->sopt_name) { 2175 #ifdef INET 2176 case SO_ACCEPTFILTER: 2177 if ((so->so_options & SO_ACCEPTCONN) == 0) 2178 return (EINVAL); 2179 afap = kmalloc(sizeof(*afap), M_TEMP, 2180 M_WAITOK | M_ZERO); 2181 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2182 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2183 if (so->so_accf->so_accept_filter_str != NULL) 2184 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2185 } 2186 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2187 kfree(afap, M_TEMP); 2188 break; 2189 #endif /* INET */ 2190 2191 case SO_LINGER: 2192 l.l_onoff = so->so_options & SO_LINGER; 2193 l.l_linger = so->so_linger; 2194 error = sooptcopyout(sopt, &l, sizeof l); 2195 break; 2196 2197 case SO_USELOOPBACK: 2198 case SO_DONTROUTE: 2199 case SO_DEBUG: 2200 case SO_KEEPALIVE: 2201 case SO_REUSEADDR: 2202 case SO_REUSEPORT: 2203 case SO_BROADCAST: 2204 case SO_OOBINLINE: 2205 case SO_TIMESTAMP: 2206 case SO_NOSIGPIPE: 2207 optval = so->so_options & sopt->sopt_name; 2208 integer: 2209 error = sooptcopyout(sopt, &optval, sizeof optval); 2210 break; 2211 2212 case SO_TYPE: 2213 optval = so->so_type; 2214 goto integer; 2215 2216 case SO_ERROR: 2217 optval = so->so_error; 2218 so->so_error = 0; 2219 goto integer; 2220 2221 case SO_SNDBUF: 2222 optval = so->so_snd.ssb_hiwat; 2223 goto integer; 2224 2225 case SO_RCVBUF: 2226 optval = so->so_rcv.ssb_hiwat; 2227 goto integer; 2228 2229 case SO_SNDLOWAT: 2230 optval = so->so_snd.ssb_lowat; 2231 goto integer; 2232 2233 case SO_RCVLOWAT: 2234 optval = so->so_rcv.ssb_lowat; 2235 goto integer; 2236 2237 case SO_SNDTIMEO: 2238 case SO_RCVTIMEO: 2239 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2240 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2241 2242 tv.tv_sec = optval / hz; 2243 tv.tv_usec = (optval % hz) * ustick; 2244 error = sooptcopyout(sopt, &tv, sizeof tv); 2245 break; 2246 2247 case SO_SNDSPACE: 2248 optval_l = ssb_space(&so->so_snd); 2249 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2250 break; 2251 2252 default: 2253 error = ENOPROTOOPT; 2254 break; 2255 } 2256 return (error); 2257 } 2258 } 2259 2260 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2261 int 2262 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2263 { 2264 struct mbuf *m, *m_prev; 2265 int sopt_size = sopt->sopt_valsize, msize; 2266 2267 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA, 2268 0, &msize); 2269 if (m == NULL) 2270 return (ENOBUFS); 2271 m->m_len = min(msize, sopt_size); 2272 sopt_size -= m->m_len; 2273 *mp = m; 2274 m_prev = m; 2275 2276 while (sopt_size > 0) { 2277 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, 2278 MT_DATA, 0, &msize); 2279 if (m == NULL) { 2280 m_freem(*mp); 2281 return (ENOBUFS); 2282 } 2283 m->m_len = min(msize, sopt_size); 2284 sopt_size -= m->m_len; 2285 m_prev->m_next = m; 2286 m_prev = m; 2287 } 2288 return (0); 2289 } 2290 2291 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2292 int 2293 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2294 { 2295 soopt_to_mbuf(sopt, m); 2296 return 0; 2297 } 2298 2299 void 2300 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2301 { 2302 size_t valsize; 2303 void *val; 2304 2305 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2306 KKASSERT(kva_p(m)); 2307 if (sopt->sopt_val == NULL) 2308 return; 2309 val = sopt->sopt_val; 2310 valsize = sopt->sopt_valsize; 2311 while (m != NULL && valsize >= m->m_len) { 2312 bcopy(val, mtod(m, char *), m->m_len); 2313 valsize -= m->m_len; 2314 val = (caddr_t)val + m->m_len; 2315 m = m->m_next; 2316 } 2317 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2318 panic("ip6_sooptmcopyin"); 2319 } 2320 2321 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2322 int 2323 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2324 { 2325 return soopt_from_mbuf(sopt, m); 2326 } 2327 2328 int 2329 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2330 { 2331 struct mbuf *m0 = m; 2332 size_t valsize = 0; 2333 size_t maxsize; 2334 void *val; 2335 2336 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2337 KKASSERT(kva_p(m)); 2338 if (sopt->sopt_val == NULL) 2339 return 0; 2340 val = sopt->sopt_val; 2341 maxsize = sopt->sopt_valsize; 2342 while (m != NULL && maxsize >= m->m_len) { 2343 bcopy(mtod(m, char *), val, m->m_len); 2344 maxsize -= m->m_len; 2345 val = (caddr_t)val + m->m_len; 2346 valsize += m->m_len; 2347 m = m->m_next; 2348 } 2349 if (m != NULL) { 2350 /* enough soopt buffer should be given from user-land */ 2351 m_freem(m0); 2352 return (EINVAL); 2353 } 2354 sopt->sopt_valsize = valsize; 2355 return 0; 2356 } 2357 2358 void 2359 sohasoutofband(struct socket *so) 2360 { 2361 if (so->so_sigio != NULL) 2362 pgsigio(so->so_sigio, SIGURG, 0); 2363 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2364 } 2365 2366 int 2367 sokqfilter(struct file *fp, struct knote *kn) 2368 { 2369 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2370 struct signalsockbuf *ssb; 2371 2372 switch (kn->kn_filter) { 2373 case EVFILT_READ: 2374 if (so->so_options & SO_ACCEPTCONN) 2375 kn->kn_fop = &solisten_filtops; 2376 else 2377 kn->kn_fop = &soread_filtops; 2378 ssb = &so->so_rcv; 2379 break; 2380 case EVFILT_WRITE: 2381 kn->kn_fop = &sowrite_filtops; 2382 ssb = &so->so_snd; 2383 break; 2384 case EVFILT_EXCEPT: 2385 kn->kn_fop = &soexcept_filtops; 2386 ssb = &so->so_rcv; 2387 break; 2388 default: 2389 return (EOPNOTSUPP); 2390 } 2391 2392 knote_insert(&ssb->ssb_kq.ki_note, kn); 2393 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2394 return (0); 2395 } 2396 2397 static void 2398 filt_sordetach(struct knote *kn) 2399 { 2400 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2401 2402 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2403 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2404 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2405 } 2406 2407 /*ARGSUSED*/ 2408 static int 2409 filt_soread(struct knote *kn, long hint) 2410 { 2411 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2412 2413 if (kn->kn_sfflags & NOTE_OOB) { 2414 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2415 kn->kn_fflags |= NOTE_OOB; 2416 return (1); 2417 } 2418 return (0); 2419 } 2420 kn->kn_data = so->so_rcv.ssb_cc; 2421 2422 if (so->so_state & SS_CANTRCVMORE) { 2423 /* 2424 * Only set NODATA if all data has been exhausted. 2425 */ 2426 if (kn->kn_data == 0) 2427 kn->kn_flags |= EV_NODATA; 2428 kn->kn_flags |= EV_EOF; 2429 kn->kn_fflags = so->so_error; 2430 return (1); 2431 } 2432 if (so->so_error) /* temporary udp error */ 2433 return (1); 2434 if (kn->kn_sfflags & NOTE_LOWAT) 2435 return (kn->kn_data >= kn->kn_sdata); 2436 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2437 !TAILQ_EMPTY(&so->so_comp)); 2438 } 2439 2440 static void 2441 filt_sowdetach(struct knote *kn) 2442 { 2443 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2444 2445 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2446 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2447 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2448 } 2449 2450 /*ARGSUSED*/ 2451 static int 2452 filt_sowrite(struct knote *kn, long hint) 2453 { 2454 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2455 2456 kn->kn_data = ssb_space(&so->so_snd); 2457 if (so->so_state & SS_CANTSENDMORE) { 2458 kn->kn_flags |= (EV_EOF | EV_NODATA); 2459 kn->kn_fflags = so->so_error; 2460 return (1); 2461 } 2462 if (so->so_error) /* temporary udp error */ 2463 return (1); 2464 if (((so->so_state & SS_ISCONNECTED) == 0) && 2465 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2466 return (0); 2467 if (kn->kn_sfflags & NOTE_LOWAT) 2468 return (kn->kn_data >= kn->kn_sdata); 2469 return (kn->kn_data >= so->so_snd.ssb_lowat); 2470 } 2471 2472 /*ARGSUSED*/ 2473 static int 2474 filt_solisten(struct knote *kn, long hint) 2475 { 2476 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2477 2478 kn->kn_data = so->so_qlen; 2479 return (! TAILQ_EMPTY(&so->so_comp)); 2480 } 2481