1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 static int use_socreate_fast = 1; 152 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 153 &use_socreate_fast, 0, "Fast socket creation"); 154 155 static int soavailconn = 32; 156 SYSCTL_INT(_kern_ipc, OID_AUTO, soavailconn, CTLFLAG_RW, 157 &soavailconn, 0, "Maximum available socket connection queue size"); 158 159 /* 160 * Socket operation routines. 161 * These routines are called by the routines in 162 * sys_socket.c or from a system process, and 163 * implement the semantics of socket operations by 164 * switching out to the protocol specific routines. 165 */ 166 167 /* 168 * Get a socket structure, and initialize it. 169 * Note that it would probably be better to allocate socket 170 * and PCB at the same time, but I'm not convinced that all 171 * the protocols can be easily modified to do this. 172 */ 173 struct socket * 174 soalloc(int waitok, struct protosw *pr) 175 { 176 struct socket *so; 177 unsigned waitmask; 178 179 waitmask = waitok ? M_WAITOK : M_NOWAIT; 180 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 181 if (so) { 182 /* XXX race condition for reentrant kernel */ 183 so->so_proto = pr; 184 TAILQ_INIT(&so->so_aiojobq); 185 TAILQ_INIT(&so->so_rcv.ssb_mlist); 186 TAILQ_INIT(&so->so_snd.ssb_mlist); 187 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 188 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 189 spin_init(&so->so_rcvd_spin, "soalloc"); 190 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 191 MSGF_DROPABLE | MSGF_PRIORITY, 192 so->so_proto->pr_usrreqs->pru_rcvd); 193 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 194 so->so_state = SS_NOFDREF; 195 so->so_refs = 1; 196 } 197 return so; 198 } 199 200 int 201 socreate(int dom, struct socket **aso, int type, 202 int proto, struct thread *td) 203 { 204 struct proc *p = td->td_proc; 205 struct protosw *prp; 206 struct socket *so; 207 struct pru_attach_info ai; 208 int error; 209 210 if (proto) 211 prp = pffindproto(dom, proto, type); 212 else 213 prp = pffindtype(dom, type); 214 215 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 216 return (EPROTONOSUPPORT); 217 218 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 219 prp->pr_domain->dom_family != PF_LOCAL && 220 prp->pr_domain->dom_family != PF_INET && 221 prp->pr_domain->dom_family != PF_INET6 && 222 prp->pr_domain->dom_family != PF_ROUTE) { 223 return (EPROTONOSUPPORT); 224 } 225 226 if (prp->pr_type != type) 227 return (EPROTOTYPE); 228 so = soalloc(p != NULL, prp); 229 if (so == NULL) 230 return (ENOBUFS); 231 232 /* 233 * Callers of socreate() presumably will connect up a descriptor 234 * and call soclose() if they cannot. This represents our so_refs 235 * (which should be 1) from soalloc(). 236 */ 237 soclrstate(so, SS_NOFDREF); 238 239 /* 240 * Set a default port for protocol processing. No action will occur 241 * on the socket on this port until an inpcb is attached to it and 242 * is able to match incoming packets, or until the socket becomes 243 * available to userland. 244 * 245 * We normally default the socket to the protocol thread on cpu 0, 246 * if protocol does not provide its own method to initialize the 247 * default port. 248 * 249 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 250 * thread and all pr_*()/pru_*() calls are executed synchronously. 251 */ 252 if (prp->pr_flags & PR_SYNC_PORT) 253 so->so_port = &netisr_sync_port; 254 else if (prp->pr_initport != NULL) 255 so->so_port = prp->pr_initport(); 256 else 257 so->so_port = netisr_cpuport(0); 258 259 TAILQ_INIT(&so->so_incomp); 260 TAILQ_INIT(&so->so_comp); 261 so->so_type = type; 262 so->so_cred = crhold(p->p_ucred); 263 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 264 ai.p_ucred = p->p_ucred; 265 ai.fd_rdir = p->p_fd->fd_rdir; 266 267 /* 268 * Auto-sizing of socket buffers is managed by the protocols and 269 * the appropriate flags must be set in the pru_attach function. 270 */ 271 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 272 error = so_pru_attach_fast(so, proto, &ai); 273 else 274 error = so_pru_attach(so, proto, &ai); 275 if (error) { 276 sosetstate(so, SS_NOFDREF); 277 sofree(so); /* from soalloc */ 278 return error; 279 } 280 281 /* 282 * NOTE: Returns referenced socket. 283 */ 284 *aso = so; 285 return (0); 286 } 287 288 int 289 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 290 { 291 int error; 292 293 error = so_pru_bind(so, nam, td); 294 return (error); 295 } 296 297 static void 298 sodealloc(struct socket *so) 299 { 300 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 301 302 #ifdef INVARIANTS 303 if (so->so_options & SO_ACCEPTCONN) { 304 KASSERT(TAILQ_EMPTY(&so->so_comp), ("so_comp is not empty")); 305 KASSERT(TAILQ_EMPTY(&so->so_incomp), 306 ("so_incomp is not empty")); 307 } 308 #endif 309 310 if (so->so_rcv.ssb_hiwat) 311 (void)chgsbsize(so->so_cred->cr_uidinfo, 312 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 313 if (so->so_snd.ssb_hiwat) 314 (void)chgsbsize(so->so_cred->cr_uidinfo, 315 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 316 #ifdef INET 317 /* remove accept filter if present */ 318 if (so->so_accf != NULL) 319 do_setopt_accept_filter(so, NULL); 320 #endif /* INET */ 321 crfree(so->so_cred); 322 if (so->so_faddr != NULL) 323 kfree(so->so_faddr, M_SONAME); 324 kfree(so, M_SOCKET); 325 } 326 327 int 328 solisten(struct socket *so, int backlog, struct thread *td) 329 { 330 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 331 return (EINVAL); 332 333 lwkt_gettoken(&so->so_rcv.ssb_token); 334 if (TAILQ_EMPTY(&so->so_comp)) 335 so->so_options |= SO_ACCEPTCONN; 336 lwkt_reltoken(&so->so_rcv.ssb_token); 337 if (backlog < 0 || backlog > somaxconn) 338 backlog = somaxconn; 339 so->so_qlimit = backlog; 340 return so_pru_listen(so, td); 341 } 342 343 static void 344 soqflush(struct socket *so) 345 { 346 lwkt_getpooltoken(so); 347 if (so->so_options & SO_ACCEPTCONN) { 348 struct socket *sp; 349 350 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 351 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 352 SS_INCOMP); 353 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 354 so->so_incqlen--; 355 soclrstate(sp, SS_INCOMP); 356 soabort_async(sp, TRUE); 357 } 358 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 359 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 360 SS_COMP); 361 TAILQ_REMOVE(&so->so_comp, sp, so_list); 362 so->so_qlen--; 363 soclrstate(sp, SS_COMP); 364 soabort_async(sp, TRUE); 365 } 366 } 367 lwkt_relpooltoken(so); 368 } 369 370 /* 371 * Destroy a disconnected socket. This routine is a NOP if entities 372 * still have a reference on the socket: 373 * 374 * so_pcb - The protocol stack still has a reference 375 * SS_NOFDREF - There is no longer a file pointer reference 376 */ 377 void 378 sofree(struct socket *so) 379 { 380 struct socket *head; 381 382 /* 383 * This is a bit hackish at the moment. We need to interlock 384 * any accept queue we are on before we potentially lose the 385 * last reference to avoid races against a re-reference from 386 * someone operating on the queue. 387 */ 388 while ((head = so->so_head) != NULL) { 389 lwkt_getpooltoken(head); 390 if (so->so_head == head) 391 break; 392 lwkt_relpooltoken(head); 393 } 394 395 /* 396 * Arbitrage the last free. 397 */ 398 KKASSERT(so->so_refs > 0); 399 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 400 if (head) 401 lwkt_relpooltoken(head); 402 return; 403 } 404 405 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 406 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 407 408 if (head != NULL) { 409 /* 410 * We're done, remove ourselves from the accept queue we are 411 * on, if we are on one. 412 */ 413 if (so->so_state & SS_INCOMP) { 414 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 415 SS_INCOMP); 416 TAILQ_REMOVE(&head->so_incomp, so, so_list); 417 head->so_incqlen--; 418 } else if (so->so_state & SS_COMP) { 419 /* 420 * We must not decommission a socket that's 421 * on the accept(2) queue. If we do, then 422 * accept(2) may hang after select(2) indicated 423 * that the listening socket was ready. 424 */ 425 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 426 SS_COMP); 427 lwkt_relpooltoken(head); 428 return; 429 } else { 430 panic("sofree: not queued"); 431 } 432 soclrstate(so, SS_INCOMP); 433 so->so_head = NULL; 434 lwkt_relpooltoken(head); 435 } else { 436 /* Flush accept queues, if we are accepting. */ 437 soqflush(so); 438 } 439 ssb_release(&so->so_snd, so); 440 sorflush(so); 441 sodealloc(so); 442 } 443 444 /* 445 * Close a socket on last file table reference removal. 446 * Initiate disconnect if connected. 447 * Free socket when disconnect complete. 448 */ 449 int 450 soclose(struct socket *so, int fflag) 451 { 452 int error; 453 454 funsetown(&so->so_sigio); 455 sosetstate(so, SS_ISCLOSING); 456 if (!use_soclose_fast || 457 (so->so_proto->pr_flags & PR_SYNC_PORT) || 458 ((so->so_state & SS_ISCONNECTED) && 459 (so->so_options & SO_LINGER) && 460 so->so_linger != 0)) { 461 error = soclose_sync(so, fflag); 462 } else { 463 soclose_fast(so); 464 error = 0; 465 } 466 return error; 467 } 468 469 void 470 sodiscard(struct socket *so) 471 { 472 if (so->so_state & SS_NOFDREF) 473 panic("soclose: NOFDREF"); 474 sosetstate(so, SS_NOFDREF); /* take ref */ 475 } 476 477 /* 478 * Append the completed queue of head to head_inh (inherting listen socket). 479 */ 480 void 481 soinherit(struct socket *head, struct socket *head_inh) 482 { 483 boolean_t do_wakeup = FALSE; 484 485 KASSERT(head->so_options & SO_ACCEPTCONN, 486 ("head does not accept connection")); 487 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 488 ("head_inh does not accept connection")); 489 490 lwkt_getpooltoken(head); 491 lwkt_getpooltoken(head_inh); 492 493 if (head->so_qlen > 0) 494 do_wakeup = TRUE; 495 496 while (!TAILQ_EMPTY(&head->so_comp)) { 497 struct ucred *old_cr; 498 struct socket *sp; 499 500 sp = TAILQ_FIRST(&head->so_comp); 501 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 502 503 /* 504 * Remove this socket from the current listen socket 505 * completed queue. 506 */ 507 TAILQ_REMOVE(&head->so_comp, sp, so_list); 508 head->so_qlen--; 509 510 /* Save the old ucred for later free. */ 511 old_cr = sp->so_cred; 512 513 /* 514 * Install this socket to the inheriting listen socket 515 * completed queue. 516 */ 517 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 518 sp->so_head = head_inh; 519 520 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 521 head_inh->so_qlen++; 522 523 /* 524 * NOTE: 525 * crfree() may block and release the tokens temporarily. 526 * However, we are fine here, since the transition is done. 527 */ 528 crfree(old_cr); 529 } 530 531 lwkt_relpooltoken(head_inh); 532 lwkt_relpooltoken(head); 533 534 if (do_wakeup) { 535 /* 536 * "New" connections have arrived 537 */ 538 sorwakeup(head_inh); 539 wakeup(&head_inh->so_timeo); 540 } 541 } 542 543 static int 544 soclose_sync(struct socket *so, int fflag) 545 { 546 int error = 0; 547 548 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 549 so_pru_sync(so); /* unpend async prus */ 550 551 if (so->so_pcb == NULL) 552 goto discard; 553 554 if (so->so_state & SS_ISCONNECTED) { 555 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 556 error = sodisconnect(so); 557 if (error) 558 goto drop; 559 } 560 if (so->so_options & SO_LINGER) { 561 if ((so->so_state & SS_ISDISCONNECTING) && 562 (fflag & FNONBLOCK)) 563 goto drop; 564 while (so->so_state & SS_ISCONNECTED) { 565 error = tsleep(&so->so_timeo, PCATCH, 566 "soclos", so->so_linger * hz); 567 if (error) 568 break; 569 } 570 } 571 } 572 drop: 573 if (so->so_pcb) { 574 int error2; 575 576 error2 = so_pru_detach(so); 577 if (error2 == EJUSTRETURN) { 578 /* 579 * Protocol will call sodiscard() 580 * and sofree() for us. 581 */ 582 return error; 583 } 584 if (error == 0) 585 error = error2; 586 } 587 discard: 588 sodiscard(so); 589 sofree(so); /* dispose of ref */ 590 591 return (error); 592 } 593 594 static void 595 soclose_fast_handler(netmsg_t msg) 596 { 597 struct socket *so = msg->base.nm_so; 598 599 if (so->so_pcb == NULL) 600 goto discard; 601 602 if ((so->so_state & SS_ISCONNECTED) && 603 (so->so_state & SS_ISDISCONNECTING) == 0) 604 so_pru_disconnect_direct(so); 605 606 if (so->so_pcb) { 607 int error; 608 609 error = so_pru_detach_direct(so); 610 if (error == EJUSTRETURN) { 611 /* 612 * Protocol will call sodiscard() 613 * and sofree() for us. 614 */ 615 return; 616 } 617 } 618 discard: 619 sodiscard(so); 620 sofree(so); 621 } 622 623 static void 624 soclose_fast(struct socket *so) 625 { 626 struct netmsg_base *base = &so->so_clomsg; 627 628 netmsg_init(base, so, &netisr_apanic_rport, 0, 629 soclose_fast_handler); 630 if (so->so_port == netisr_curport()) 631 lwkt_sendmsg_oncpu(so->so_port, &base->lmsg); 632 else 633 lwkt_sendmsg(so->so_port, &base->lmsg); 634 } 635 636 /* 637 * Abort and destroy a socket. Only one abort can be in progress 638 * at any given moment. 639 */ 640 void 641 soabort_async(struct socket *so, boolean_t clr_head) 642 { 643 /* 644 * Keep a reference before clearing the so_head 645 * to avoid racing socket close in netisr. 646 */ 647 soreference(so); 648 if (clr_head) 649 so->so_head = NULL; 650 so_pru_abort_async(so); 651 } 652 653 void 654 soabort_direct(struct socket *so) 655 { 656 soreference(so); 657 so_pru_abort_direct(so); 658 } 659 660 /* 661 * so is passed in ref'd, which becomes owned by 662 * the cleared SS_NOFDREF flag. 663 */ 664 void 665 soaccept_generic(struct socket *so) 666 { 667 if ((so->so_state & SS_NOFDREF) == 0) 668 panic("soaccept: !NOFDREF"); 669 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 670 } 671 672 int 673 soaccept(struct socket *so, struct sockaddr **nam) 674 { 675 int error; 676 677 soaccept_generic(so); 678 error = so_pru_accept(so, nam); 679 return (error); 680 } 681 682 int 683 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 684 boolean_t sync) 685 { 686 int error; 687 688 if (so->so_options & SO_ACCEPTCONN) 689 return (EOPNOTSUPP); 690 /* 691 * If protocol is connection-based, can only connect once. 692 * Otherwise, if connected, try to disconnect first. 693 * This allows user to disconnect by connecting to, e.g., 694 * a null address. 695 */ 696 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 697 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 698 (error = sodisconnect(so)))) { 699 error = EISCONN; 700 } else { 701 /* 702 * Prevent accumulated error from previous connection 703 * from biting us. 704 */ 705 so->so_error = 0; 706 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 707 error = so_pru_connect_async(so, nam, td); 708 else 709 error = so_pru_connect(so, nam, td); 710 } 711 return (error); 712 } 713 714 int 715 soconnect2(struct socket *so1, struct socket *so2) 716 { 717 int error; 718 719 error = so_pru_connect2(so1, so2); 720 return (error); 721 } 722 723 int 724 sodisconnect(struct socket *so) 725 { 726 int error; 727 728 if ((so->so_state & SS_ISCONNECTED) == 0) { 729 error = ENOTCONN; 730 goto bad; 731 } 732 if (so->so_state & SS_ISDISCONNECTING) { 733 error = EALREADY; 734 goto bad; 735 } 736 error = so_pru_disconnect(so); 737 bad: 738 return (error); 739 } 740 741 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 742 /* 743 * Send on a socket. 744 * If send must go all at once and message is larger than 745 * send buffering, then hard error. 746 * Lock against other senders. 747 * If must go all at once and not enough room now, then 748 * inform user that this would block and do nothing. 749 * Otherwise, if nonblocking, send as much as possible. 750 * The data to be sent is described by "uio" if nonzero, 751 * otherwise by the mbuf chain "top" (which must be null 752 * if uio is not). Data provided in mbuf chain must be small 753 * enough to send all at once. 754 * 755 * Returns nonzero on error, timeout or signal; callers 756 * must check for short counts if EINTR/ERESTART are returned. 757 * Data and control buffers are freed on return. 758 */ 759 int 760 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 761 struct mbuf *top, struct mbuf *control, int flags, 762 struct thread *td) 763 { 764 struct mbuf **mp; 765 struct mbuf *m; 766 size_t resid; 767 int space, len; 768 int clen = 0, error, dontroute, mlen; 769 int atomic = sosendallatonce(so) || top; 770 int pru_flags; 771 772 if (uio) { 773 resid = uio->uio_resid; 774 } else { 775 resid = (size_t)top->m_pkthdr.len; 776 #ifdef INVARIANTS 777 len = 0; 778 for (m = top; m; m = m->m_next) 779 len += m->m_len; 780 KKASSERT(top->m_pkthdr.len == len); 781 #endif 782 } 783 784 /* 785 * WARNING! resid is unsigned, space and len are signed. space 786 * can wind up negative if the sockbuf is overcommitted. 787 * 788 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 789 * type sockets since that's an error. 790 */ 791 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 792 error = EINVAL; 793 goto out; 794 } 795 796 dontroute = 797 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 798 (so->so_proto->pr_flags & PR_ATOMIC); 799 if (td->td_lwp != NULL) 800 td->td_lwp->lwp_ru.ru_msgsnd++; 801 if (control) 802 clen = control->m_len; 803 #define gotoerr(errcode) { error = errcode; goto release; } 804 805 restart: 806 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 807 if (error) 808 goto out; 809 810 do { 811 if (so->so_state & SS_CANTSENDMORE) 812 gotoerr(EPIPE); 813 if (so->so_error) { 814 error = so->so_error; 815 so->so_error = 0; 816 goto release; 817 } 818 if ((so->so_state & SS_ISCONNECTED) == 0) { 819 /* 820 * `sendto' and `sendmsg' is allowed on a connection- 821 * based socket if it supports implied connect. 822 * Return ENOTCONN if not connected and no address is 823 * supplied. 824 */ 825 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 826 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 827 if ((so->so_state & SS_ISCONFIRMING) == 0 && 828 !(resid == 0 && clen != 0)) 829 gotoerr(ENOTCONN); 830 } else if (addr == NULL) 831 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 832 ENOTCONN : EDESTADDRREQ); 833 } 834 if ((atomic && resid > so->so_snd.ssb_hiwat) || 835 clen > so->so_snd.ssb_hiwat) { 836 gotoerr(EMSGSIZE); 837 } 838 space = ssb_space(&so->so_snd); 839 if (flags & MSG_OOB) 840 space += 1024; 841 if ((space < 0 || (size_t)space < resid + clen) && uio && 842 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 843 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 844 gotoerr(EWOULDBLOCK); 845 ssb_unlock(&so->so_snd); 846 error = ssb_wait(&so->so_snd); 847 if (error) 848 goto out; 849 goto restart; 850 } 851 mp = ⊤ 852 space -= clen; 853 do { 854 if (uio == NULL) { 855 /* 856 * Data is prepackaged in "top". 857 */ 858 resid = 0; 859 if (flags & MSG_EOR) 860 top->m_flags |= M_EOR; 861 } else do { 862 if (resid > INT_MAX) 863 resid = INT_MAX; 864 m = m_getl((int)resid, M_WAITOK, MT_DATA, 865 top == NULL ? M_PKTHDR : 0, &mlen); 866 if (top == NULL) { 867 m->m_pkthdr.len = 0; 868 m->m_pkthdr.rcvif = NULL; 869 } 870 len = imin((int)szmin(mlen, resid), space); 871 if (resid < MINCLSIZE) { 872 /* 873 * For datagram protocols, leave room 874 * for protocol headers in first mbuf. 875 */ 876 if (atomic && top == NULL && len < mlen) 877 MH_ALIGN(m, len); 878 } 879 space -= len; 880 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 881 resid = uio->uio_resid; 882 m->m_len = len; 883 *mp = m; 884 top->m_pkthdr.len += len; 885 if (error) 886 goto release; 887 mp = &m->m_next; 888 if (resid == 0) { 889 if (flags & MSG_EOR) 890 top->m_flags |= M_EOR; 891 break; 892 } 893 } while (space > 0 && atomic); 894 if (dontroute) 895 so->so_options |= SO_DONTROUTE; 896 if (flags & MSG_OOB) { 897 pru_flags = PRUS_OOB; 898 } else if ((flags & MSG_EOF) && 899 (so->so_proto->pr_flags & PR_IMPLOPCL) && 900 (resid == 0)) { 901 /* 902 * If the user set MSG_EOF, the protocol 903 * understands this flag and nothing left to 904 * send then use PRU_SEND_EOF instead of PRU_SEND. 905 */ 906 pru_flags = PRUS_EOF; 907 } else if (resid > 0 && space > 0) { 908 /* If there is more to send, set PRUS_MORETOCOME */ 909 pru_flags = PRUS_MORETOCOME; 910 } else { 911 pru_flags = 0; 912 } 913 /* 914 * XXX all the SS_CANTSENDMORE checks previously 915 * done could be out of date. We could have recieved 916 * a reset packet in an interrupt or maybe we slept 917 * while doing page faults in uiomove() etc. We could 918 * probably recheck again inside the splnet() protection 919 * here, but there are probably other places that this 920 * also happens. We must rethink this. 921 */ 922 error = so_pru_send(so, pru_flags, top, addr, control, td); 923 if (dontroute) 924 so->so_options &= ~SO_DONTROUTE; 925 clen = 0; 926 control = NULL; 927 top = NULL; 928 mp = ⊤ 929 if (error) 930 goto release; 931 } while (resid && space > 0); 932 } while (resid); 933 934 release: 935 ssb_unlock(&so->so_snd); 936 out: 937 if (top) 938 m_freem(top); 939 if (control) 940 m_freem(control); 941 return (error); 942 } 943 944 #ifdef INET 945 /* 946 * A specialization of sosend() for UDP based on protocol-specific knowledge: 947 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 948 * sosendallatonce() returns true, 949 * the "atomic" variable is true, 950 * and sosendudp() blocks until space is available for the entire send. 951 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 952 * PR_IMPLOPCL flags set. 953 * UDP has no out-of-band data. 954 * UDP has no control data. 955 * UDP does not support MSG_EOR. 956 */ 957 int 958 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 959 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 960 { 961 size_t resid; 962 int error, pru_flags = 0; 963 int space; 964 965 if (td->td_lwp != NULL) 966 td->td_lwp->lwp_ru.ru_msgsnd++; 967 if (control) 968 m_freem(control); 969 970 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 971 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 972 973 restart: 974 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 975 if (error) 976 goto out; 977 978 if (so->so_state & SS_CANTSENDMORE) 979 gotoerr(EPIPE); 980 if (so->so_error) { 981 error = so->so_error; 982 so->so_error = 0; 983 goto release; 984 } 985 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 986 gotoerr(EDESTADDRREQ); 987 if (resid > so->so_snd.ssb_hiwat) 988 gotoerr(EMSGSIZE); 989 space = ssb_space(&so->so_snd); 990 if (uio && (space < 0 || (size_t)space < resid)) { 991 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 992 gotoerr(EWOULDBLOCK); 993 ssb_unlock(&so->so_snd); 994 error = ssb_wait(&so->so_snd); 995 if (error) 996 goto out; 997 goto restart; 998 } 999 1000 if (uio) { 1001 int hdrlen = max_hdr; 1002 1003 /* 1004 * We try to optimize out the additional mbuf 1005 * allocations in M_PREPEND() on output path, e.g. 1006 * - udp_output(), when it tries to prepend protocol 1007 * headers. 1008 * - Link layer output function, when it tries to 1009 * prepend link layer header. 1010 * 1011 * This probably will not benefit any data that will 1012 * be fragmented, so this optimization is only performed 1013 * when the size of data and max size of protocol+link 1014 * headers fit into one mbuf cluster. 1015 */ 1016 if (uio->uio_resid > MCLBYTES - hdrlen || 1017 !udp_sosend_prepend) { 1018 top = m_uiomove(uio); 1019 if (top == NULL) 1020 goto release; 1021 } else { 1022 int nsize; 1023 1024 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1025 MT_DATA, M_PKTHDR, &nsize); 1026 KASSERT(nsize >= uio->uio_resid + hdrlen, 1027 ("sosendudp invalid nsize %d, " 1028 "resid %zu, hdrlen %d", 1029 nsize, uio->uio_resid, hdrlen)); 1030 1031 top->m_len = uio->uio_resid; 1032 top->m_pkthdr.len = uio->uio_resid; 1033 top->m_data += hdrlen; 1034 1035 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1036 if (error) 1037 goto out; 1038 } 1039 } 1040 1041 if (flags & MSG_DONTROUTE) 1042 pru_flags |= PRUS_DONTROUTE; 1043 1044 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1045 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1046 error = 0; 1047 } else { 1048 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1049 } 1050 top = NULL; /* sent or freed in lower layer */ 1051 1052 release: 1053 ssb_unlock(&so->so_snd); 1054 out: 1055 if (top) 1056 m_freem(top); 1057 return (error); 1058 } 1059 1060 int 1061 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1062 struct mbuf *top, struct mbuf *control, int flags, 1063 struct thread *td) 1064 { 1065 struct mbuf **mp; 1066 struct mbuf *m; 1067 size_t resid; 1068 int space, len; 1069 int error, mlen; 1070 int allatonce; 1071 int pru_flags; 1072 1073 if (uio) { 1074 KKASSERT(top == NULL); 1075 allatonce = 0; 1076 resid = uio->uio_resid; 1077 } else { 1078 allatonce = 1; 1079 resid = (size_t)top->m_pkthdr.len; 1080 #ifdef INVARIANTS 1081 len = 0; 1082 for (m = top; m; m = m->m_next) 1083 len += m->m_len; 1084 KKASSERT(top->m_pkthdr.len == len); 1085 #endif 1086 } 1087 1088 /* 1089 * WARNING! resid is unsigned, space and len are signed. space 1090 * can wind up negative if the sockbuf is overcommitted. 1091 * 1092 * Also check to make sure that MSG_EOR isn't used on TCP 1093 */ 1094 if (flags & MSG_EOR) { 1095 error = EINVAL; 1096 goto out; 1097 } 1098 1099 if (control) { 1100 /* TCP doesn't do control messages (rights, creds, etc) */ 1101 if (control->m_len) { 1102 error = EINVAL; 1103 goto out; 1104 } 1105 m_freem(control); /* empty control, just free it */ 1106 control = NULL; 1107 } 1108 1109 if (td->td_lwp != NULL) 1110 td->td_lwp->lwp_ru.ru_msgsnd++; 1111 1112 #define gotoerr(errcode) { error = errcode; goto release; } 1113 1114 restart: 1115 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1116 if (error) 1117 goto out; 1118 1119 do { 1120 if (so->so_state & SS_CANTSENDMORE) 1121 gotoerr(EPIPE); 1122 if (so->so_error) { 1123 error = so->so_error; 1124 so->so_error = 0; 1125 goto release; 1126 } 1127 if ((so->so_state & SS_ISCONNECTED) == 0 && 1128 (so->so_state & SS_ISCONFIRMING) == 0) 1129 gotoerr(ENOTCONN); 1130 if (allatonce && resid > so->so_snd.ssb_hiwat) 1131 gotoerr(EMSGSIZE); 1132 1133 space = ssb_space_prealloc(&so->so_snd); 1134 if (flags & MSG_OOB) 1135 space += 1024; 1136 if ((space < 0 || (size_t)space < resid) && !allatonce && 1137 space < so->so_snd.ssb_lowat) { 1138 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1139 gotoerr(EWOULDBLOCK); 1140 ssb_unlock(&so->so_snd); 1141 error = ssb_wait(&so->so_snd); 1142 if (error) 1143 goto out; 1144 goto restart; 1145 } 1146 mp = ⊤ 1147 do { 1148 int cnt = 0, async = 0; 1149 1150 if (uio == NULL) { 1151 /* 1152 * Data is prepackaged in "top". 1153 */ 1154 resid = 0; 1155 } else do { 1156 if (resid > INT_MAX) 1157 resid = INT_MAX; 1158 if (tcp_sosend_jcluster) { 1159 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1160 top == NULL ? M_PKTHDR : 0, &mlen); 1161 } else { 1162 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1163 top == NULL ? M_PKTHDR : 0, &mlen); 1164 } 1165 if (top == NULL) { 1166 m->m_pkthdr.len = 0; 1167 m->m_pkthdr.rcvif = NULL; 1168 } 1169 len = imin((int)szmin(mlen, resid), space); 1170 space -= len; 1171 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1172 resid = uio->uio_resid; 1173 m->m_len = len; 1174 *mp = m; 1175 top->m_pkthdr.len += len; 1176 if (error) 1177 goto release; 1178 mp = &m->m_next; 1179 if (resid == 0) 1180 break; 1181 ++cnt; 1182 } while (space > 0 && cnt < tcp_sosend_agglim); 1183 1184 if (tcp_sosend_async) 1185 async = 1; 1186 1187 if (flags & MSG_OOB) { 1188 pru_flags = PRUS_OOB; 1189 async = 0; 1190 } else if ((flags & MSG_EOF) && resid == 0) { 1191 pru_flags = PRUS_EOF; 1192 } else if (resid > 0 && space > 0) { 1193 /* If there is more to send, set PRUS_MORETOCOME */ 1194 pru_flags = PRUS_MORETOCOME; 1195 async = 1; 1196 } else { 1197 pru_flags = 0; 1198 } 1199 1200 if (flags & MSG_SYNC) 1201 async = 0; 1202 1203 /* 1204 * XXX all the SS_CANTSENDMORE checks previously 1205 * done could be out of date. We could have recieved 1206 * a reset packet in an interrupt or maybe we slept 1207 * while doing page faults in uiomove() etc. We could 1208 * probably recheck again inside the splnet() protection 1209 * here, but there are probably other places that this 1210 * also happens. We must rethink this. 1211 */ 1212 for (m = top; m; m = m->m_next) 1213 ssb_preallocstream(&so->so_snd, m); 1214 if (!async) { 1215 error = so_pru_send(so, pru_flags, top, 1216 NULL, NULL, td); 1217 } else { 1218 so_pru_send_async(so, pru_flags, top, 1219 NULL, NULL, td); 1220 error = 0; 1221 } 1222 1223 top = NULL; 1224 mp = ⊤ 1225 if (error) 1226 goto release; 1227 } while (resid && space > 0); 1228 } while (resid); 1229 1230 release: 1231 ssb_unlock(&so->so_snd); 1232 out: 1233 if (top) 1234 m_freem(top); 1235 if (control) 1236 m_freem(control); 1237 return (error); 1238 } 1239 #endif 1240 1241 /* 1242 * Implement receive operations on a socket. 1243 * 1244 * We depend on the way that records are added to the signalsockbuf 1245 * by sbappend*. In particular, each record (mbufs linked through m_next) 1246 * must begin with an address if the protocol so specifies, 1247 * followed by an optional mbuf or mbufs containing ancillary data, 1248 * and then zero or more mbufs of data. 1249 * 1250 * Although the signalsockbuf is locked, new data may still be appended. 1251 * A token inside the ssb_lock deals with MP issues and still allows 1252 * the network to access the socket if we block in a uio. 1253 * 1254 * The caller may receive the data as a single mbuf chain by supplying 1255 * an mbuf **mp0 for use in returning the chain. The uio is then used 1256 * only for the count in uio_resid. 1257 */ 1258 int 1259 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1260 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1261 { 1262 struct mbuf *m, *n; 1263 struct mbuf *free_chain = NULL; 1264 int flags, len, error, offset; 1265 struct protosw *pr = so->so_proto; 1266 int moff, type = 0; 1267 size_t resid, orig_resid; 1268 boolean_t free_rights = FALSE; 1269 1270 if (uio) 1271 resid = uio->uio_resid; 1272 else 1273 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1274 orig_resid = resid; 1275 1276 if (psa) 1277 *psa = NULL; 1278 if (controlp) 1279 *controlp = NULL; 1280 if (flagsp) 1281 flags = *flagsp &~ MSG_EOR; 1282 else 1283 flags = 0; 1284 if (flags & MSG_OOB) { 1285 m = m_get(M_WAITOK, MT_DATA); 1286 if (m == NULL) 1287 return (ENOBUFS); 1288 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1289 if (error) 1290 goto bad; 1291 if (sio) { 1292 do { 1293 sbappend(sio, m); 1294 KKASSERT(resid >= (size_t)m->m_len); 1295 resid -= (size_t)m->m_len; 1296 } while (resid > 0 && m); 1297 } else { 1298 do { 1299 uio->uio_resid = resid; 1300 error = uiomove(mtod(m, caddr_t), 1301 (int)szmin(resid, m->m_len), 1302 uio); 1303 resid = uio->uio_resid; 1304 m = m_free(m); 1305 } while (uio->uio_resid && error == 0 && m); 1306 } 1307 bad: 1308 if (m) 1309 m_freem(m); 1310 return (error); 1311 } 1312 if ((so->so_state & SS_ISCONFIRMING) && resid) 1313 so_pru_rcvd(so, 0); 1314 1315 /* 1316 * The token interlocks against the protocol thread while 1317 * ssb_lock is a blocking lock against other userland entities. 1318 */ 1319 lwkt_gettoken(&so->so_rcv.ssb_token); 1320 restart: 1321 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1322 if (error) 1323 goto done; 1324 1325 m = so->so_rcv.ssb_mb; 1326 /* 1327 * If we have less data than requested, block awaiting more 1328 * (subject to any timeout) if: 1329 * 1. the current count is less than the low water mark, or 1330 * 2. MSG_WAITALL is set, and it is possible to do the entire 1331 * receive operation at once if we block (resid <= hiwat). 1332 * 3. MSG_DONTWAIT is not set 1333 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1334 * we have to do the receive in sections, and thus risk returning 1335 * a short count if a timeout or signal occurs after we start. 1336 */ 1337 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1338 (size_t)so->so_rcv.ssb_cc < resid) && 1339 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1340 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1341 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1342 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1343 if (so->so_error) { 1344 if (m) 1345 goto dontblock; 1346 error = so->so_error; 1347 if ((flags & MSG_PEEK) == 0) 1348 so->so_error = 0; 1349 goto release; 1350 } 1351 if (so->so_state & SS_CANTRCVMORE) { 1352 if (m) 1353 goto dontblock; 1354 else 1355 goto release; 1356 } 1357 for (; m; m = m->m_next) { 1358 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1359 m = so->so_rcv.ssb_mb; 1360 goto dontblock; 1361 } 1362 } 1363 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1364 (pr->pr_flags & PR_CONNREQUIRED)) { 1365 error = ENOTCONN; 1366 goto release; 1367 } 1368 if (resid == 0) 1369 goto release; 1370 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1371 error = EWOULDBLOCK; 1372 goto release; 1373 } 1374 ssb_unlock(&so->so_rcv); 1375 error = ssb_wait(&so->so_rcv); 1376 if (error) 1377 goto done; 1378 goto restart; 1379 } 1380 dontblock: 1381 if (uio && uio->uio_td && uio->uio_td->td_proc) 1382 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1383 1384 /* 1385 * note: m should be == sb_mb here. Cache the next record while 1386 * cleaning up. Note that calling m_free*() will break out critical 1387 * section. 1388 */ 1389 KKASSERT(m == so->so_rcv.ssb_mb); 1390 1391 /* 1392 * Skip any address mbufs prepending the record. 1393 */ 1394 if (pr->pr_flags & PR_ADDR) { 1395 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1396 orig_resid = 0; 1397 if (psa) 1398 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1399 if (flags & MSG_PEEK) 1400 m = m->m_next; 1401 else 1402 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1403 } 1404 1405 /* 1406 * Skip any control mbufs prepending the record. 1407 */ 1408 while (m && m->m_type == MT_CONTROL && error == 0) { 1409 if (flags & MSG_PEEK) { 1410 if (controlp) 1411 *controlp = m_copy(m, 0, m->m_len); 1412 m = m->m_next; /* XXX race */ 1413 } else { 1414 const struct cmsghdr *cm = mtod(m, struct cmsghdr *); 1415 1416 if (controlp) { 1417 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1418 if (pr->pr_domain->dom_externalize && 1419 cm->cmsg_level == SOL_SOCKET && 1420 cm->cmsg_type == SCM_RIGHTS) { 1421 error = pr->pr_domain->dom_externalize 1422 (m, flags); 1423 } 1424 *controlp = m; 1425 m = n; 1426 } else { 1427 if (cm->cmsg_level == SOL_SOCKET && 1428 cm->cmsg_type == SCM_RIGHTS) 1429 free_rights = TRUE; 1430 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1431 } 1432 } 1433 if (controlp && *controlp) { 1434 orig_resid = 0; 1435 controlp = &(*controlp)->m_next; 1436 } 1437 } 1438 1439 /* 1440 * flag OOB data. 1441 */ 1442 if (m) { 1443 type = m->m_type; 1444 if (type == MT_OOBDATA) 1445 flags |= MSG_OOB; 1446 } 1447 1448 /* 1449 * Copy to the UIO or mbuf return chain (*mp). 1450 */ 1451 moff = 0; 1452 offset = 0; 1453 while (m && resid > 0 && error == 0) { 1454 if (m->m_type == MT_OOBDATA) { 1455 if (type != MT_OOBDATA) 1456 break; 1457 } else if (type == MT_OOBDATA) 1458 break; 1459 else 1460 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1461 ("receive 3")); 1462 soclrstate(so, SS_RCVATMARK); 1463 len = (resid > INT_MAX) ? INT_MAX : resid; 1464 if (so->so_oobmark && len > so->so_oobmark - offset) 1465 len = so->so_oobmark - offset; 1466 if (len > m->m_len - moff) 1467 len = m->m_len - moff; 1468 1469 /* 1470 * Copy out to the UIO or pass the mbufs back to the SIO. 1471 * The SIO is dealt with when we eat the mbuf, but deal 1472 * with the resid here either way. 1473 */ 1474 if (uio) { 1475 uio->uio_resid = resid; 1476 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1477 resid = uio->uio_resid; 1478 if (error) 1479 goto release; 1480 } else { 1481 resid -= (size_t)len; 1482 } 1483 1484 /* 1485 * Eat the entire mbuf or just a piece of it 1486 */ 1487 if (len == m->m_len - moff) { 1488 if (m->m_flags & M_EOR) 1489 flags |= MSG_EOR; 1490 if (flags & MSG_PEEK) { 1491 m = m->m_next; 1492 moff = 0; 1493 } else { 1494 if (sio) { 1495 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1496 sbappend(sio, m); 1497 m = n; 1498 } else { 1499 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1500 } 1501 } 1502 } else { 1503 if (flags & MSG_PEEK) { 1504 moff += len; 1505 } else { 1506 if (sio) { 1507 n = m_copym(m, 0, len, M_WAITOK); 1508 if (n) 1509 sbappend(sio, n); 1510 } 1511 m->m_data += len; 1512 m->m_len -= len; 1513 so->so_rcv.ssb_cc -= len; 1514 } 1515 } 1516 if (so->so_oobmark) { 1517 if ((flags & MSG_PEEK) == 0) { 1518 so->so_oobmark -= len; 1519 if (so->so_oobmark == 0) { 1520 sosetstate(so, SS_RCVATMARK); 1521 break; 1522 } 1523 } else { 1524 offset += len; 1525 if (offset == so->so_oobmark) 1526 break; 1527 } 1528 } 1529 if (flags & MSG_EOR) 1530 break; 1531 /* 1532 * If the MSG_WAITALL flag is set (for non-atomic socket), 1533 * we must not quit until resid == 0 or an error 1534 * termination. If a signal/timeout occurs, return 1535 * with a short count but without error. 1536 * Keep signalsockbuf locked against other readers. 1537 */ 1538 while ((flags & MSG_WAITALL) && m == NULL && 1539 resid > 0 && !sosendallatonce(so) && 1540 so->so_rcv.ssb_mb == NULL) { 1541 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1542 break; 1543 /* 1544 * The window might have closed to zero, make 1545 * sure we send an ack now that we've drained 1546 * the buffer or we might end up blocking until 1547 * the idle takes over (5 seconds). 1548 */ 1549 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1550 so_pru_rcvd(so, flags); 1551 error = ssb_wait(&so->so_rcv); 1552 if (error) { 1553 ssb_unlock(&so->so_rcv); 1554 error = 0; 1555 goto done; 1556 } 1557 m = so->so_rcv.ssb_mb; 1558 } 1559 } 1560 1561 /* 1562 * If an atomic read was requested but unread data still remains 1563 * in the record, set MSG_TRUNC. 1564 */ 1565 if (m && pr->pr_flags & PR_ATOMIC) 1566 flags |= MSG_TRUNC; 1567 1568 /* 1569 * Cleanup. If an atomic read was requested drop any unread data. 1570 */ 1571 if ((flags & MSG_PEEK) == 0) { 1572 if (m && (pr->pr_flags & PR_ATOMIC)) 1573 sbdroprecord(&so->so_rcv.sb); 1574 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1575 so_pru_rcvd(so, flags); 1576 } 1577 1578 if (orig_resid == resid && orig_resid && 1579 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1580 ssb_unlock(&so->so_rcv); 1581 goto restart; 1582 } 1583 1584 if (flagsp) 1585 *flagsp |= flags; 1586 release: 1587 ssb_unlock(&so->so_rcv); 1588 done: 1589 lwkt_reltoken(&so->so_rcv.ssb_token); 1590 if (free_chain) { 1591 if (free_rights && (pr->pr_flags & PR_RIGHTS) && 1592 pr->pr_domain->dom_dispose) 1593 pr->pr_domain->dom_dispose(free_chain); 1594 m_freem(free_chain); 1595 } 1596 return (error); 1597 } 1598 1599 int 1600 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1601 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1602 { 1603 struct mbuf *m, *n; 1604 struct mbuf *free_chain = NULL; 1605 int flags, len, error, offset; 1606 struct protosw *pr = so->so_proto; 1607 int moff; 1608 int didoob; 1609 size_t resid, orig_resid, restmp; 1610 1611 if (uio) 1612 resid = uio->uio_resid; 1613 else 1614 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1615 orig_resid = resid; 1616 1617 if (psa) 1618 *psa = NULL; 1619 if (controlp) 1620 *controlp = NULL; 1621 if (flagsp) 1622 flags = *flagsp &~ MSG_EOR; 1623 else 1624 flags = 0; 1625 if (flags & MSG_OOB) { 1626 m = m_get(M_WAITOK, MT_DATA); 1627 if (m == NULL) 1628 return (ENOBUFS); 1629 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1630 if (error) 1631 goto bad; 1632 if (sio) { 1633 do { 1634 sbappend(sio, m); 1635 KKASSERT(resid >= (size_t)m->m_len); 1636 resid -= (size_t)m->m_len; 1637 } while (resid > 0 && m); 1638 } else { 1639 do { 1640 uio->uio_resid = resid; 1641 error = uiomove(mtod(m, caddr_t), 1642 (int)szmin(resid, m->m_len), 1643 uio); 1644 resid = uio->uio_resid; 1645 m = m_free(m); 1646 } while (uio->uio_resid && error == 0 && m); 1647 } 1648 bad: 1649 if (m) 1650 m_freem(m); 1651 return (error); 1652 } 1653 1654 /* 1655 * The token interlocks against the protocol thread while 1656 * ssb_lock is a blocking lock against other userland entities. 1657 * 1658 * Lock a limited number of mbufs (not all, so sbcompress() still 1659 * works well). The token is used as an interlock for sbwait() so 1660 * release it afterwords. 1661 */ 1662 restart: 1663 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1664 if (error) 1665 goto done; 1666 1667 lwkt_gettoken(&so->so_rcv.ssb_token); 1668 m = so->so_rcv.ssb_mb; 1669 1670 /* 1671 * If we have less data than requested, block awaiting more 1672 * (subject to any timeout) if: 1673 * 1. the current count is less than the low water mark, or 1674 * 2. MSG_WAITALL is set, and it is possible to do the entire 1675 * receive operation at once if we block (resid <= hiwat). 1676 * 3. MSG_DONTWAIT is not set 1677 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1678 * we have to do the receive in sections, and thus risk returning 1679 * a short count if a timeout or signal occurs after we start. 1680 */ 1681 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1682 (size_t)so->so_rcv.ssb_cc < resid) && 1683 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1684 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1685 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1686 if (so->so_error) { 1687 if (m) 1688 goto dontblock; 1689 lwkt_reltoken(&so->so_rcv.ssb_token); 1690 error = so->so_error; 1691 if ((flags & MSG_PEEK) == 0) 1692 so->so_error = 0; 1693 goto release; 1694 } 1695 if (so->so_state & SS_CANTRCVMORE) { 1696 if (m) 1697 goto dontblock; 1698 lwkt_reltoken(&so->so_rcv.ssb_token); 1699 goto release; 1700 } 1701 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1702 (pr->pr_flags & PR_CONNREQUIRED)) { 1703 lwkt_reltoken(&so->so_rcv.ssb_token); 1704 error = ENOTCONN; 1705 goto release; 1706 } 1707 if (resid == 0) { 1708 lwkt_reltoken(&so->so_rcv.ssb_token); 1709 goto release; 1710 } 1711 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1712 lwkt_reltoken(&so->so_rcv.ssb_token); 1713 error = EWOULDBLOCK; 1714 goto release; 1715 } 1716 ssb_unlock(&so->so_rcv); 1717 error = ssb_wait(&so->so_rcv); 1718 lwkt_reltoken(&so->so_rcv.ssb_token); 1719 if (error) 1720 goto done; 1721 goto restart; 1722 } 1723 1724 /* 1725 * Token still held 1726 */ 1727 dontblock: 1728 n = m; 1729 restmp = 0; 1730 while (n && restmp < resid) { 1731 n->m_flags |= M_SOLOCKED; 1732 restmp += n->m_len; 1733 if (n->m_next == NULL) 1734 n = n->m_nextpkt; 1735 else 1736 n = n->m_next; 1737 } 1738 1739 /* 1740 * Release token for loop 1741 */ 1742 lwkt_reltoken(&so->so_rcv.ssb_token); 1743 if (uio && uio->uio_td && uio->uio_td->td_proc) 1744 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1745 1746 /* 1747 * note: m should be == sb_mb here. Cache the next record while 1748 * cleaning up. Note that calling m_free*() will break out critical 1749 * section. 1750 */ 1751 KKASSERT(m == so->so_rcv.ssb_mb); 1752 1753 /* 1754 * Copy to the UIO or mbuf return chain (*mp). 1755 * 1756 * NOTE: Token is not held for loop 1757 */ 1758 moff = 0; 1759 offset = 0; 1760 didoob = 0; 1761 1762 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1763 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1764 ("receive 3")); 1765 1766 soclrstate(so, SS_RCVATMARK); 1767 len = (resid > INT_MAX) ? INT_MAX : resid; 1768 if (so->so_oobmark && len > so->so_oobmark - offset) 1769 len = so->so_oobmark - offset; 1770 if (len > m->m_len - moff) 1771 len = m->m_len - moff; 1772 1773 /* 1774 * Copy out to the UIO or pass the mbufs back to the SIO. 1775 * The SIO is dealt with when we eat the mbuf, but deal 1776 * with the resid here either way. 1777 */ 1778 if (uio) { 1779 uio->uio_resid = resid; 1780 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1781 resid = uio->uio_resid; 1782 if (error) 1783 goto release; 1784 } else { 1785 resid -= (size_t)len; 1786 } 1787 1788 /* 1789 * Eat the entire mbuf or just a piece of it 1790 */ 1791 offset += len; 1792 if (len == m->m_len - moff) { 1793 m = m->m_next; 1794 moff = 0; 1795 } else { 1796 moff += len; 1797 } 1798 1799 /* 1800 * Check oobmark 1801 */ 1802 if (so->so_oobmark && offset == so->so_oobmark) { 1803 didoob = 1; 1804 break; 1805 } 1806 } 1807 1808 /* 1809 * Synchronize sockbuf with data we read. 1810 * 1811 * NOTE: (m) is junk on entry (it could be left over from the 1812 * previous loop). 1813 */ 1814 if ((flags & MSG_PEEK) == 0) { 1815 lwkt_gettoken(&so->so_rcv.ssb_token); 1816 m = so->so_rcv.ssb_mb; 1817 while (m && offset >= m->m_len) { 1818 if (so->so_oobmark) { 1819 so->so_oobmark -= m->m_len; 1820 if (so->so_oobmark == 0) { 1821 sosetstate(so, SS_RCVATMARK); 1822 didoob = 1; 1823 } 1824 } 1825 offset -= m->m_len; 1826 if (sio) { 1827 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1828 sbappend(sio, m); 1829 m = n; 1830 } else { 1831 m = sbunlinkmbuf(&so->so_rcv.sb, 1832 m, &free_chain); 1833 } 1834 } 1835 if (offset) { 1836 KKASSERT(m); 1837 if (sio) { 1838 n = m_copym(m, 0, offset, M_WAITOK); 1839 if (n) 1840 sbappend(sio, n); 1841 } 1842 m->m_data += offset; 1843 m->m_len -= offset; 1844 so->so_rcv.ssb_cc -= offset; 1845 if (so->so_oobmark) { 1846 so->so_oobmark -= offset; 1847 if (so->so_oobmark == 0) { 1848 sosetstate(so, SS_RCVATMARK); 1849 didoob = 1; 1850 } 1851 } 1852 offset = 0; 1853 } 1854 lwkt_reltoken(&so->so_rcv.ssb_token); 1855 } 1856 1857 /* 1858 * If the MSG_WAITALL flag is set (for non-atomic socket), 1859 * we must not quit until resid == 0 or an error termination. 1860 * 1861 * If a signal/timeout occurs, return with a short count but without 1862 * error. 1863 * 1864 * Keep signalsockbuf locked against other readers. 1865 * 1866 * XXX if MSG_PEEK we currently do quit. 1867 */ 1868 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1869 didoob == 0 && resid > 0 && 1870 !sosendallatonce(so)) { 1871 lwkt_gettoken(&so->so_rcv.ssb_token); 1872 error = 0; 1873 while ((m = so->so_rcv.ssb_mb) == NULL) { 1874 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1875 error = so->so_error; 1876 break; 1877 } 1878 /* 1879 * The window might have closed to zero, make 1880 * sure we send an ack now that we've drained 1881 * the buffer or we might end up blocking until 1882 * the idle takes over (5 seconds). 1883 */ 1884 if (so->so_pcb) 1885 so_pru_rcvd_async(so); 1886 if (so->so_rcv.ssb_mb == NULL) 1887 error = ssb_wait(&so->so_rcv); 1888 if (error) { 1889 lwkt_reltoken(&so->so_rcv.ssb_token); 1890 ssb_unlock(&so->so_rcv); 1891 error = 0; 1892 goto done; 1893 } 1894 } 1895 if (m && error == 0) 1896 goto dontblock; 1897 lwkt_reltoken(&so->so_rcv.ssb_token); 1898 } 1899 1900 /* 1901 * Token not held here. 1902 * 1903 * Cleanup. If an atomic read was requested drop any unread data XXX 1904 */ 1905 if ((flags & MSG_PEEK) == 0) { 1906 if (so->so_pcb) 1907 so_pru_rcvd_async(so); 1908 } 1909 1910 if (orig_resid == resid && orig_resid && 1911 (so->so_state & SS_CANTRCVMORE) == 0) { 1912 ssb_unlock(&so->so_rcv); 1913 goto restart; 1914 } 1915 1916 if (flagsp) 1917 *flagsp |= flags; 1918 release: 1919 ssb_unlock(&so->so_rcv); 1920 done: 1921 if (free_chain) 1922 m_freem(free_chain); 1923 return (error); 1924 } 1925 1926 /* 1927 * Shut a socket down. Note that we do not get a frontend lock as we 1928 * want to be able to shut the socket down even if another thread is 1929 * blocked in a read(), thus waking it up. 1930 */ 1931 int 1932 soshutdown(struct socket *so, int how) 1933 { 1934 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1935 return (EINVAL); 1936 1937 if (how != SHUT_WR) { 1938 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1939 sorflush(so); 1940 /*ssb_unlock(&so->so_rcv);*/ 1941 } 1942 if (how != SHUT_RD) 1943 return (so_pru_shutdown(so)); 1944 return (0); 1945 } 1946 1947 void 1948 sorflush(struct socket *so) 1949 { 1950 struct signalsockbuf *ssb = &so->so_rcv; 1951 struct protosw *pr = so->so_proto; 1952 struct signalsockbuf asb; 1953 1954 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1955 1956 lwkt_gettoken(&ssb->ssb_token); 1957 socantrcvmore(so); 1958 asb = *ssb; 1959 1960 /* 1961 * Can't just blow up the ssb structure here 1962 */ 1963 bzero(&ssb->sb, sizeof(ssb->sb)); 1964 ssb->ssb_timeo = 0; 1965 ssb->ssb_lowat = 0; 1966 ssb->ssb_hiwat = 0; 1967 ssb->ssb_mbmax = 0; 1968 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1969 1970 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1971 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1972 ssb_release(&asb, so); 1973 1974 lwkt_reltoken(&ssb->ssb_token); 1975 } 1976 1977 #ifdef INET 1978 static int 1979 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1980 { 1981 struct accept_filter_arg *afap = NULL; 1982 struct accept_filter *afp; 1983 struct so_accf *af = so->so_accf; 1984 int error = 0; 1985 1986 /* do not set/remove accept filters on non listen sockets */ 1987 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1988 error = EINVAL; 1989 goto out; 1990 } 1991 1992 /* removing the filter */ 1993 if (sopt == NULL) { 1994 if (af != NULL) { 1995 if (af->so_accept_filter != NULL && 1996 af->so_accept_filter->accf_destroy != NULL) { 1997 af->so_accept_filter->accf_destroy(so); 1998 } 1999 if (af->so_accept_filter_str != NULL) { 2000 kfree(af->so_accept_filter_str, M_ACCF); 2001 } 2002 kfree(af, M_ACCF); 2003 so->so_accf = NULL; 2004 } 2005 so->so_options &= ~SO_ACCEPTFILTER; 2006 return (0); 2007 } 2008 /* adding a filter */ 2009 /* must remove previous filter first */ 2010 if (af != NULL) { 2011 error = EINVAL; 2012 goto out; 2013 } 2014 /* don't put large objects on the kernel stack */ 2015 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2016 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2017 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2018 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2019 if (error) 2020 goto out; 2021 afp = accept_filt_get(afap->af_name); 2022 if (afp == NULL) { 2023 error = ENOENT; 2024 goto out; 2025 } 2026 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2027 if (afp->accf_create != NULL) { 2028 if (afap->af_name[0] != '\0') { 2029 int len = strlen(afap->af_name) + 1; 2030 2031 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2032 M_WAITOK); 2033 strcpy(af->so_accept_filter_str, afap->af_name); 2034 } 2035 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2036 if (af->so_accept_filter_arg == NULL) { 2037 kfree(af->so_accept_filter_str, M_ACCF); 2038 kfree(af, M_ACCF); 2039 so->so_accf = NULL; 2040 error = EINVAL; 2041 goto out; 2042 } 2043 } 2044 af->so_accept_filter = afp; 2045 so->so_accf = af; 2046 so->so_options |= SO_ACCEPTFILTER; 2047 out: 2048 if (afap != NULL) 2049 kfree(afap, M_TEMP); 2050 return (error); 2051 } 2052 #endif /* INET */ 2053 2054 /* 2055 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2056 * an additional variant to handle the case where the option value needs 2057 * to be some kind of integer, but not a specific size. 2058 * In addition to their use here, these functions are also called by the 2059 * protocol-level pr_ctloutput() routines. 2060 */ 2061 int 2062 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2063 { 2064 return soopt_to_kbuf(sopt, buf, len, minlen); 2065 } 2066 2067 int 2068 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2069 { 2070 size_t valsize; 2071 2072 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2073 KKASSERT(kva_p(buf)); 2074 2075 /* 2076 * If the user gives us more than we wanted, we ignore it, 2077 * but if we don't get the minimum length the caller 2078 * wants, we return EINVAL. On success, sopt->sopt_valsize 2079 * is set to however much we actually retrieved. 2080 */ 2081 if ((valsize = sopt->sopt_valsize) < minlen) 2082 return EINVAL; 2083 if (valsize > len) 2084 sopt->sopt_valsize = valsize = len; 2085 2086 bcopy(sopt->sopt_val, buf, valsize); 2087 return 0; 2088 } 2089 2090 2091 int 2092 sosetopt(struct socket *so, struct sockopt *sopt) 2093 { 2094 int error, optval; 2095 struct linger l; 2096 struct timeval tv; 2097 u_long val; 2098 struct signalsockbuf *sotmp; 2099 2100 error = 0; 2101 sopt->sopt_dir = SOPT_SET; 2102 if (sopt->sopt_level != SOL_SOCKET) { 2103 if (so->so_proto && so->so_proto->pr_ctloutput) { 2104 return (so_pr_ctloutput(so, sopt)); 2105 } 2106 error = ENOPROTOOPT; 2107 } else { 2108 switch (sopt->sopt_name) { 2109 #ifdef INET 2110 case SO_ACCEPTFILTER: 2111 error = do_setopt_accept_filter(so, sopt); 2112 if (error) 2113 goto bad; 2114 break; 2115 #endif /* INET */ 2116 case SO_LINGER: 2117 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2118 if (error) 2119 goto bad; 2120 2121 so->so_linger = l.l_linger; 2122 if (l.l_onoff) 2123 so->so_options |= SO_LINGER; 2124 else 2125 so->so_options &= ~SO_LINGER; 2126 break; 2127 2128 case SO_DEBUG: 2129 case SO_KEEPALIVE: 2130 case SO_DONTROUTE: 2131 case SO_USELOOPBACK: 2132 case SO_BROADCAST: 2133 case SO_REUSEADDR: 2134 case SO_REUSEPORT: 2135 case SO_OOBINLINE: 2136 case SO_TIMESTAMP: 2137 case SO_NOSIGPIPE: 2138 error = sooptcopyin(sopt, &optval, sizeof optval, 2139 sizeof optval); 2140 if (error) 2141 goto bad; 2142 if (optval) 2143 so->so_options |= sopt->sopt_name; 2144 else 2145 so->so_options &= ~sopt->sopt_name; 2146 break; 2147 2148 case SO_SNDBUF: 2149 case SO_RCVBUF: 2150 case SO_SNDLOWAT: 2151 case SO_RCVLOWAT: 2152 error = sooptcopyin(sopt, &optval, sizeof optval, 2153 sizeof optval); 2154 if (error) 2155 goto bad; 2156 2157 /* 2158 * Values < 1 make no sense for any of these 2159 * options, so disallow them. 2160 */ 2161 if (optval < 1) { 2162 error = EINVAL; 2163 goto bad; 2164 } 2165 2166 switch (sopt->sopt_name) { 2167 case SO_SNDBUF: 2168 case SO_RCVBUF: 2169 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2170 &so->so_snd : &so->so_rcv, (u_long)optval, 2171 so, 2172 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2173 error = ENOBUFS; 2174 goto bad; 2175 } 2176 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2177 &so->so_snd : &so->so_rcv; 2178 atomic_clear_int(&sotmp->ssb_flags, 2179 SSB_AUTOSIZE); 2180 break; 2181 2182 /* 2183 * Make sure the low-water is never greater than 2184 * the high-water. 2185 */ 2186 case SO_SNDLOWAT: 2187 so->so_snd.ssb_lowat = 2188 (optval > so->so_snd.ssb_hiwat) ? 2189 so->so_snd.ssb_hiwat : optval; 2190 atomic_clear_int(&so->so_snd.ssb_flags, 2191 SSB_AUTOLOWAT); 2192 break; 2193 case SO_RCVLOWAT: 2194 so->so_rcv.ssb_lowat = 2195 (optval > so->so_rcv.ssb_hiwat) ? 2196 so->so_rcv.ssb_hiwat : optval; 2197 atomic_clear_int(&so->so_rcv.ssb_flags, 2198 SSB_AUTOLOWAT); 2199 break; 2200 } 2201 break; 2202 2203 case SO_SNDTIMEO: 2204 case SO_RCVTIMEO: 2205 error = sooptcopyin(sopt, &tv, sizeof tv, 2206 sizeof tv); 2207 if (error) 2208 goto bad; 2209 2210 /* assert(hz > 0); */ 2211 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2212 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2213 error = EDOM; 2214 goto bad; 2215 } 2216 /* assert(tick > 0); */ 2217 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2218 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2219 if (val > INT_MAX) { 2220 error = EDOM; 2221 goto bad; 2222 } 2223 if (val == 0 && tv.tv_usec != 0) 2224 val = 1; 2225 2226 switch (sopt->sopt_name) { 2227 case SO_SNDTIMEO: 2228 so->so_snd.ssb_timeo = val; 2229 break; 2230 case SO_RCVTIMEO: 2231 so->so_rcv.ssb_timeo = val; 2232 break; 2233 } 2234 break; 2235 default: 2236 error = ENOPROTOOPT; 2237 break; 2238 } 2239 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2240 (void) so_pr_ctloutput(so, sopt); 2241 } 2242 } 2243 bad: 2244 return (error); 2245 } 2246 2247 /* Helper routine for getsockopt */ 2248 int 2249 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2250 { 2251 soopt_from_kbuf(sopt, buf, len); 2252 return 0; 2253 } 2254 2255 void 2256 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2257 { 2258 size_t valsize; 2259 2260 if (len == 0) { 2261 sopt->sopt_valsize = 0; 2262 return; 2263 } 2264 2265 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2266 KKASSERT(kva_p(buf)); 2267 2268 /* 2269 * Documented get behavior is that we always return a value, 2270 * possibly truncated to fit in the user's buffer. 2271 * Traditional behavior is that we always tell the user 2272 * precisely how much we copied, rather than something useful 2273 * like the total amount we had available for her. 2274 * Note that this interface is not idempotent; the entire answer must 2275 * generated ahead of time. 2276 */ 2277 valsize = szmin(len, sopt->sopt_valsize); 2278 sopt->sopt_valsize = valsize; 2279 if (sopt->sopt_val != 0) { 2280 bcopy(buf, sopt->sopt_val, valsize); 2281 } 2282 } 2283 2284 int 2285 sogetopt(struct socket *so, struct sockopt *sopt) 2286 { 2287 int error, optval; 2288 long optval_l; 2289 struct linger l; 2290 struct timeval tv; 2291 #ifdef INET 2292 struct accept_filter_arg *afap; 2293 #endif 2294 2295 error = 0; 2296 sopt->sopt_dir = SOPT_GET; 2297 if (sopt->sopt_level != SOL_SOCKET) { 2298 if (so->so_proto && so->so_proto->pr_ctloutput) { 2299 return (so_pr_ctloutput(so, sopt)); 2300 } else 2301 return (ENOPROTOOPT); 2302 } else { 2303 switch (sopt->sopt_name) { 2304 #ifdef INET 2305 case SO_ACCEPTFILTER: 2306 if ((so->so_options & SO_ACCEPTCONN) == 0) 2307 return (EINVAL); 2308 afap = kmalloc(sizeof(*afap), M_TEMP, 2309 M_WAITOK | M_ZERO); 2310 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2311 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2312 if (so->so_accf->so_accept_filter_str != NULL) 2313 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2314 } 2315 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2316 kfree(afap, M_TEMP); 2317 break; 2318 #endif /* INET */ 2319 2320 case SO_LINGER: 2321 l.l_onoff = so->so_options & SO_LINGER; 2322 l.l_linger = so->so_linger; 2323 error = sooptcopyout(sopt, &l, sizeof l); 2324 break; 2325 2326 case SO_USELOOPBACK: 2327 case SO_DONTROUTE: 2328 case SO_DEBUG: 2329 case SO_KEEPALIVE: 2330 case SO_REUSEADDR: 2331 case SO_REUSEPORT: 2332 case SO_BROADCAST: 2333 case SO_OOBINLINE: 2334 case SO_TIMESTAMP: 2335 case SO_NOSIGPIPE: 2336 optval = so->so_options & sopt->sopt_name; 2337 integer: 2338 error = sooptcopyout(sopt, &optval, sizeof optval); 2339 break; 2340 2341 case SO_TYPE: 2342 optval = so->so_type; 2343 goto integer; 2344 2345 case SO_ERROR: 2346 optval = so->so_error; 2347 so->so_error = 0; 2348 goto integer; 2349 2350 case SO_SNDBUF: 2351 optval = so->so_snd.ssb_hiwat; 2352 goto integer; 2353 2354 case SO_RCVBUF: 2355 optval = so->so_rcv.ssb_hiwat; 2356 goto integer; 2357 2358 case SO_SNDLOWAT: 2359 optval = so->so_snd.ssb_lowat; 2360 goto integer; 2361 2362 case SO_RCVLOWAT: 2363 optval = so->so_rcv.ssb_lowat; 2364 goto integer; 2365 2366 case SO_SNDTIMEO: 2367 case SO_RCVTIMEO: 2368 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2369 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2370 2371 tv.tv_sec = optval / hz; 2372 tv.tv_usec = (optval % hz) * ustick; 2373 error = sooptcopyout(sopt, &tv, sizeof tv); 2374 break; 2375 2376 case SO_SNDSPACE: 2377 optval_l = ssb_space(&so->so_snd); 2378 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2379 break; 2380 2381 case SO_CPUHINT: 2382 optval = -1; /* no hint */ 2383 goto integer; 2384 2385 default: 2386 error = ENOPROTOOPT; 2387 break; 2388 } 2389 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2390 so_pr_ctloutput(so, sopt); 2391 return (error); 2392 } 2393 } 2394 2395 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2396 int 2397 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2398 { 2399 struct mbuf *m, *m_prev; 2400 int sopt_size = sopt->sopt_valsize, msize; 2401 2402 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2403 0, &msize); 2404 if (m == NULL) 2405 return (ENOBUFS); 2406 m->m_len = min(msize, sopt_size); 2407 sopt_size -= m->m_len; 2408 *mp = m; 2409 m_prev = m; 2410 2411 while (sopt_size > 0) { 2412 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2413 MT_DATA, 0, &msize); 2414 if (m == NULL) { 2415 m_freem(*mp); 2416 return (ENOBUFS); 2417 } 2418 m->m_len = min(msize, sopt_size); 2419 sopt_size -= m->m_len; 2420 m_prev->m_next = m; 2421 m_prev = m; 2422 } 2423 return (0); 2424 } 2425 2426 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2427 int 2428 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2429 { 2430 soopt_to_mbuf(sopt, m); 2431 return 0; 2432 } 2433 2434 void 2435 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2436 { 2437 size_t valsize; 2438 void *val; 2439 2440 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2441 KKASSERT(kva_p(m)); 2442 if (sopt->sopt_val == NULL) 2443 return; 2444 val = sopt->sopt_val; 2445 valsize = sopt->sopt_valsize; 2446 while (m != NULL && valsize >= m->m_len) { 2447 bcopy(val, mtod(m, char *), m->m_len); 2448 valsize -= m->m_len; 2449 val = (caddr_t)val + m->m_len; 2450 m = m->m_next; 2451 } 2452 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2453 panic("ip6_sooptmcopyin"); 2454 } 2455 2456 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2457 int 2458 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2459 { 2460 return soopt_from_mbuf(sopt, m); 2461 } 2462 2463 int 2464 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2465 { 2466 struct mbuf *m0 = m; 2467 size_t valsize = 0; 2468 size_t maxsize; 2469 void *val; 2470 2471 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2472 KKASSERT(kva_p(m)); 2473 if (sopt->sopt_val == NULL) 2474 return 0; 2475 val = sopt->sopt_val; 2476 maxsize = sopt->sopt_valsize; 2477 while (m != NULL && maxsize >= m->m_len) { 2478 bcopy(mtod(m, char *), val, m->m_len); 2479 maxsize -= m->m_len; 2480 val = (caddr_t)val + m->m_len; 2481 valsize += m->m_len; 2482 m = m->m_next; 2483 } 2484 if (m != NULL) { 2485 /* enough soopt buffer should be given from user-land */ 2486 m_freem(m0); 2487 return (EINVAL); 2488 } 2489 sopt->sopt_valsize = valsize; 2490 return 0; 2491 } 2492 2493 void 2494 sohasoutofband(struct socket *so) 2495 { 2496 if (so->so_sigio != NULL) 2497 pgsigio(so->so_sigio, SIGURG, 0); 2498 /* 2499 * NOTE: 2500 * There is no need to use NOTE_OOB as KNOTE hint here: 2501 * soread filter depends on so_oobmark and SS_RCVATMARK 2502 * so_state. NOTE_OOB would cause unnecessary penalty 2503 * in KNOTE, if there was knote processing contention. 2504 */ 2505 KNOTE(&so->so_rcv.ssb_kq.ki_note, 0); 2506 } 2507 2508 int 2509 sokqfilter(struct file *fp, struct knote *kn) 2510 { 2511 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2512 struct signalsockbuf *ssb; 2513 2514 switch (kn->kn_filter) { 2515 case EVFILT_READ: 2516 if (so->so_options & SO_ACCEPTCONN) 2517 kn->kn_fop = &solisten_filtops; 2518 else 2519 kn->kn_fop = &soread_filtops; 2520 ssb = &so->so_rcv; 2521 break; 2522 case EVFILT_WRITE: 2523 kn->kn_fop = &sowrite_filtops; 2524 ssb = &so->so_snd; 2525 break; 2526 case EVFILT_EXCEPT: 2527 kn->kn_fop = &soexcept_filtops; 2528 ssb = &so->so_rcv; 2529 break; 2530 default: 2531 return (EOPNOTSUPP); 2532 } 2533 2534 knote_insert(&ssb->ssb_kq.ki_note, kn); 2535 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2536 return (0); 2537 } 2538 2539 static void 2540 filt_sordetach(struct knote *kn) 2541 { 2542 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2543 2544 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2545 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2546 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2547 } 2548 2549 /*ARGSUSED*/ 2550 static int 2551 filt_soread(struct knote *kn, long hint __unused) 2552 { 2553 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2554 2555 if (kn->kn_sfflags & NOTE_OOB) { 2556 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2557 kn->kn_fflags |= NOTE_OOB; 2558 return (1); 2559 } 2560 return (0); 2561 } 2562 kn->kn_data = so->so_rcv.ssb_cc; 2563 2564 if (so->so_state & SS_CANTRCVMORE) { 2565 /* 2566 * Only set NODATA if all data has been exhausted. 2567 */ 2568 if (kn->kn_data == 0) 2569 kn->kn_flags |= EV_NODATA; 2570 kn->kn_flags |= EV_EOF; 2571 kn->kn_fflags = so->so_error; 2572 return (1); 2573 } 2574 if (so->so_error) /* temporary udp error */ 2575 return (1); 2576 if (kn->kn_sfflags & NOTE_LOWAT) 2577 return (kn->kn_data >= kn->kn_sdata); 2578 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2579 !TAILQ_EMPTY(&so->so_comp)); 2580 } 2581 2582 static void 2583 filt_sowdetach(struct knote *kn) 2584 { 2585 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2586 2587 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2588 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2589 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2590 } 2591 2592 /*ARGSUSED*/ 2593 static int 2594 filt_sowrite(struct knote *kn, long hint __unused) 2595 { 2596 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2597 2598 if (so->so_snd.ssb_flags & SSB_PREALLOC) 2599 kn->kn_data = ssb_space_prealloc(&so->so_snd); 2600 else 2601 kn->kn_data = ssb_space(&so->so_snd); 2602 2603 if (so->so_state & SS_CANTSENDMORE) { 2604 kn->kn_flags |= (EV_EOF | EV_NODATA); 2605 kn->kn_fflags = so->so_error; 2606 return (1); 2607 } 2608 if (so->so_error) /* temporary udp error */ 2609 return (1); 2610 if (((so->so_state & SS_ISCONNECTED) == 0) && 2611 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2612 return (0); 2613 if (kn->kn_sfflags & NOTE_LOWAT) 2614 return (kn->kn_data >= kn->kn_sdata); 2615 return (kn->kn_data >= so->so_snd.ssb_lowat); 2616 } 2617 2618 /*ARGSUSED*/ 2619 static int 2620 filt_solisten(struct knote *kn, long hint __unused) 2621 { 2622 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2623 int qlen = so->so_qlen; 2624 2625 if (soavailconn > 0 && qlen > soavailconn) 2626 qlen = soavailconn; 2627 kn->kn_data = qlen; 2628 2629 return (!TAILQ_EMPTY(&so->so_comp)); 2630 } 2631