1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 static int use_socreate_fast = 1; 152 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 153 &use_socreate_fast, 0, "Fast socket creation"); 154 155 static int soavailconn = 32; 156 SYSCTL_INT(_kern_ipc, OID_AUTO, soavailconn, CTLFLAG_RW, 157 &soavailconn, 0, "Maximum available socket connection queue size"); 158 159 /* 160 * Socket operation routines. 161 * These routines are called by the routines in 162 * sys_socket.c or from a system process, and 163 * implement the semantics of socket operations by 164 * switching out to the protocol specific routines. 165 */ 166 167 /* 168 * Get a socket structure, and initialize it. 169 * Note that it would probably be better to allocate socket 170 * and PCB at the same time, but I'm not convinced that all 171 * the protocols can be easily modified to do this. 172 */ 173 struct socket * 174 soalloc(int waitok, struct protosw *pr) 175 { 176 struct socket *so; 177 unsigned waitmask; 178 179 waitmask = waitok ? M_WAITOK : M_NOWAIT; 180 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 181 if (so) { 182 /* XXX race condition for reentrant kernel */ 183 so->so_proto = pr; 184 TAILQ_INIT(&so->so_aiojobq); 185 TAILQ_INIT(&so->so_rcv.ssb_mlist); 186 TAILQ_INIT(&so->so_snd.ssb_mlist); 187 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 188 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 189 spin_init(&so->so_rcvd_spin, "soalloc"); 190 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 191 MSGF_DROPABLE | MSGF_PRIORITY, 192 so->so_proto->pr_usrreqs->pru_rcvd); 193 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 194 so->so_state = SS_NOFDREF; 195 so->so_refs = 1; 196 } 197 return so; 198 } 199 200 int 201 socreate(int dom, struct socket **aso, int type, 202 int proto, struct thread *td) 203 { 204 struct proc *p = td->td_proc; 205 struct protosw *prp; 206 struct socket *so; 207 struct pru_attach_info ai; 208 int error; 209 210 if (proto) 211 prp = pffindproto(dom, proto, type); 212 else 213 prp = pffindtype(dom, type); 214 215 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 216 return (EPROTONOSUPPORT); 217 218 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 219 prp->pr_domain->dom_family != PF_LOCAL && 220 prp->pr_domain->dom_family != PF_INET && 221 prp->pr_domain->dom_family != PF_INET6 && 222 prp->pr_domain->dom_family != PF_ROUTE) { 223 return (EPROTONOSUPPORT); 224 } 225 226 if (prp->pr_type != type) 227 return (EPROTOTYPE); 228 so = soalloc(p != NULL, prp); 229 if (so == NULL) 230 return (ENOBUFS); 231 232 /* 233 * Callers of socreate() presumably will connect up a descriptor 234 * and call soclose() if they cannot. This represents our so_refs 235 * (which should be 1) from soalloc(). 236 */ 237 soclrstate(so, SS_NOFDREF); 238 239 /* 240 * Set a default port for protocol processing. No action will occur 241 * on the socket on this port until an inpcb is attached to it and 242 * is able to match incoming packets, or until the socket becomes 243 * available to userland. 244 * 245 * We normally default the socket to the protocol thread on cpu 0, 246 * if protocol does not provide its own method to initialize the 247 * default port. 248 * 249 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 250 * thread and all pr_*()/pru_*() calls are executed synchronously. 251 */ 252 if (prp->pr_flags & PR_SYNC_PORT) 253 so->so_port = &netisr_sync_port; 254 else if (prp->pr_initport != NULL) 255 so->so_port = prp->pr_initport(); 256 else 257 so->so_port = netisr_cpuport(0); 258 259 TAILQ_INIT(&so->so_incomp); 260 TAILQ_INIT(&so->so_comp); 261 so->so_type = type; 262 so->so_cred = crhold(p->p_ucred); 263 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 264 ai.p_ucred = p->p_ucred; 265 ai.fd_rdir = p->p_fd->fd_rdir; 266 267 /* 268 * Auto-sizing of socket buffers is managed by the protocols and 269 * the appropriate flags must be set in the pru_attach function. 270 */ 271 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 272 error = so_pru_attach_fast(so, proto, &ai); 273 else 274 error = so_pru_attach(so, proto, &ai); 275 if (error) { 276 sosetstate(so, SS_NOFDREF); 277 sofree(so); /* from soalloc */ 278 return error; 279 } 280 281 /* 282 * NOTE: Returns referenced socket. 283 */ 284 *aso = so; 285 return (0); 286 } 287 288 int 289 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 290 { 291 int error; 292 293 error = so_pru_bind(so, nam, td); 294 return (error); 295 } 296 297 static void 298 sodealloc(struct socket *so) 299 { 300 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 301 302 #ifdef INVARIANTS 303 if (so->so_options & SO_ACCEPTCONN) { 304 KASSERT(TAILQ_EMPTY(&so->so_comp), ("so_comp is not empty")); 305 KASSERT(TAILQ_EMPTY(&so->so_incomp), 306 ("so_incomp is not empty")); 307 } 308 #endif 309 310 if (so->so_rcv.ssb_hiwat) 311 (void)chgsbsize(so->so_cred->cr_uidinfo, 312 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 313 if (so->so_snd.ssb_hiwat) 314 (void)chgsbsize(so->so_cred->cr_uidinfo, 315 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 316 #ifdef INET 317 /* remove accept filter if present */ 318 if (so->so_accf != NULL) 319 do_setopt_accept_filter(so, NULL); 320 #endif /* INET */ 321 crfree(so->so_cred); 322 if (so->so_faddr != NULL) 323 kfree(so->so_faddr, M_SONAME); 324 kfree(so, M_SOCKET); 325 } 326 327 int 328 solisten(struct socket *so, int backlog, struct thread *td) 329 { 330 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 331 return (EINVAL); 332 333 lwkt_gettoken(&so->so_rcv.ssb_token); 334 if (TAILQ_EMPTY(&so->so_comp)) 335 so->so_options |= SO_ACCEPTCONN; 336 lwkt_reltoken(&so->so_rcv.ssb_token); 337 if (backlog < 0 || backlog > somaxconn) 338 backlog = somaxconn; 339 so->so_qlimit = backlog; 340 return so_pru_listen(so, td); 341 } 342 343 static void 344 soqflush(struct socket *so) 345 { 346 lwkt_getpooltoken(so); 347 if (so->so_options & SO_ACCEPTCONN) { 348 struct socket *sp; 349 350 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 351 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 352 SS_INCOMP); 353 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 354 so->so_incqlen--; 355 soclrstate(sp, SS_INCOMP); 356 soabort_async(sp, TRUE); 357 } 358 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 359 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 360 SS_COMP); 361 TAILQ_REMOVE(&so->so_comp, sp, so_list); 362 so->so_qlen--; 363 soclrstate(sp, SS_COMP); 364 soabort_async(sp, TRUE); 365 } 366 } 367 lwkt_relpooltoken(so); 368 } 369 370 /* 371 * Destroy a disconnected socket. This routine is a NOP if entities 372 * still have a reference on the socket: 373 * 374 * so_pcb - The protocol stack still has a reference 375 * SS_NOFDREF - There is no longer a file pointer reference 376 */ 377 void 378 sofree(struct socket *so) 379 { 380 struct socket *head; 381 382 /* 383 * This is a bit hackish at the moment. We need to interlock 384 * any accept queue we are on before we potentially lose the 385 * last reference to avoid races against a re-reference from 386 * someone operating on the queue. 387 */ 388 while ((head = so->so_head) != NULL) { 389 lwkt_getpooltoken(head); 390 if (so->so_head == head) 391 break; 392 lwkt_relpooltoken(head); 393 } 394 395 /* 396 * Arbitrage the last free. 397 */ 398 KKASSERT(so->so_refs > 0); 399 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 400 if (head) 401 lwkt_relpooltoken(head); 402 return; 403 } 404 405 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 406 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 407 408 if (head != NULL) { 409 /* 410 * We're done, remove ourselves from the accept queue we are 411 * on, if we are on one. 412 */ 413 if (so->so_state & SS_INCOMP) { 414 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 415 SS_INCOMP); 416 TAILQ_REMOVE(&head->so_incomp, so, so_list); 417 head->so_incqlen--; 418 } else if (so->so_state & SS_COMP) { 419 /* 420 * We must not decommission a socket that's 421 * on the accept(2) queue. If we do, then 422 * accept(2) may hang after select(2) indicated 423 * that the listening socket was ready. 424 */ 425 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 426 SS_COMP); 427 lwkt_relpooltoken(head); 428 return; 429 } else { 430 panic("sofree: not queued"); 431 } 432 soclrstate(so, SS_INCOMP); 433 so->so_head = NULL; 434 lwkt_relpooltoken(head); 435 } else { 436 /* Flush accept queues, if we are accepting. */ 437 soqflush(so); 438 } 439 ssb_release(&so->so_snd, so); 440 sorflush(so); 441 sodealloc(so); 442 } 443 444 /* 445 * Close a socket on last file table reference removal. 446 * Initiate disconnect if connected. 447 * Free socket when disconnect complete. 448 */ 449 int 450 soclose(struct socket *so, int fflag) 451 { 452 int error; 453 454 funsetown(&so->so_sigio); 455 sosetstate(so, SS_ISCLOSING); 456 if (!use_soclose_fast || 457 (so->so_proto->pr_flags & PR_SYNC_PORT) || 458 ((so->so_state & SS_ISCONNECTED) && 459 (so->so_options & SO_LINGER))) { 460 error = soclose_sync(so, fflag); 461 } else { 462 soclose_fast(so); 463 error = 0; 464 } 465 return error; 466 } 467 468 void 469 sodiscard(struct socket *so) 470 { 471 if (so->so_state & SS_NOFDREF) 472 panic("soclose: NOFDREF"); 473 sosetstate(so, SS_NOFDREF); /* take ref */ 474 } 475 476 /* 477 * Append the completed queue of head to head_inh (inherting listen socket). 478 */ 479 void 480 soinherit(struct socket *head, struct socket *head_inh) 481 { 482 boolean_t do_wakeup = FALSE; 483 484 KASSERT(head->so_options & SO_ACCEPTCONN, 485 ("head does not accept connection")); 486 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 487 ("head_inh does not accept connection")); 488 489 lwkt_getpooltoken(head); 490 lwkt_getpooltoken(head_inh); 491 492 if (head->so_qlen > 0) 493 do_wakeup = TRUE; 494 495 while (!TAILQ_EMPTY(&head->so_comp)) { 496 struct ucred *old_cr; 497 struct socket *sp; 498 499 sp = TAILQ_FIRST(&head->so_comp); 500 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 501 502 /* 503 * Remove this socket from the current listen socket 504 * completed queue. 505 */ 506 TAILQ_REMOVE(&head->so_comp, sp, so_list); 507 head->so_qlen--; 508 509 /* Save the old ucred for later free. */ 510 old_cr = sp->so_cred; 511 512 /* 513 * Install this socket to the inheriting listen socket 514 * completed queue. 515 */ 516 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 517 sp->so_head = head_inh; 518 519 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 520 head_inh->so_qlen++; 521 522 /* 523 * NOTE: 524 * crfree() may block and release the tokens temporarily. 525 * However, we are fine here, since the transition is done. 526 */ 527 crfree(old_cr); 528 } 529 530 lwkt_relpooltoken(head_inh); 531 lwkt_relpooltoken(head); 532 533 if (do_wakeup) { 534 /* 535 * "New" connections have arrived 536 */ 537 sorwakeup(head_inh); 538 wakeup(&head_inh->so_timeo); 539 } 540 } 541 542 static int 543 soclose_sync(struct socket *so, int fflag) 544 { 545 int error = 0; 546 547 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 548 so_pru_sync(so); /* unpend async prus */ 549 550 if (so->so_pcb == NULL) 551 goto discard; 552 553 if (so->so_state & SS_ISCONNECTED) { 554 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 555 error = sodisconnect(so); 556 if (error) 557 goto drop; 558 } 559 if (so->so_options & SO_LINGER) { 560 if ((so->so_state & SS_ISDISCONNECTING) && 561 (fflag & FNONBLOCK)) 562 goto drop; 563 while (so->so_state & SS_ISCONNECTED) { 564 error = tsleep(&so->so_timeo, PCATCH, 565 "soclos", so->so_linger * hz); 566 if (error) 567 break; 568 } 569 } 570 } 571 drop: 572 if (so->so_pcb) { 573 int error2; 574 575 error2 = so_pru_detach(so); 576 if (error2 == EJUSTRETURN) { 577 /* 578 * Protocol will call sodiscard() 579 * and sofree() for us. 580 */ 581 return error; 582 } 583 if (error == 0) 584 error = error2; 585 } 586 discard: 587 sodiscard(so); 588 sofree(so); /* dispose of ref */ 589 590 return (error); 591 } 592 593 static void 594 soclose_fast_handler(netmsg_t msg) 595 { 596 struct socket *so = msg->base.nm_so; 597 598 if (so->so_pcb == NULL) 599 goto discard; 600 601 if ((so->so_state & SS_ISCONNECTED) && 602 (so->so_state & SS_ISDISCONNECTING) == 0) 603 so_pru_disconnect_direct(so); 604 605 if (so->so_pcb) { 606 int error; 607 608 error = so_pru_detach_direct(so); 609 if (error == EJUSTRETURN) { 610 /* 611 * Protocol will call sodiscard() 612 * and sofree() for us. 613 */ 614 return; 615 } 616 } 617 discard: 618 sodiscard(so); 619 sofree(so); 620 } 621 622 static void 623 soclose_fast(struct socket *so) 624 { 625 struct netmsg_base *base = &so->so_clomsg; 626 627 netmsg_init(base, so, &netisr_apanic_rport, 0, 628 soclose_fast_handler); 629 if (so->so_port == netisr_curport()) 630 lwkt_sendmsg_oncpu(so->so_port, &base->lmsg); 631 else 632 lwkt_sendmsg(so->so_port, &base->lmsg); 633 } 634 635 /* 636 * Abort and destroy a socket. Only one abort can be in progress 637 * at any given moment. 638 */ 639 void 640 soabort_async(struct socket *so, boolean_t clr_head) 641 { 642 /* 643 * Keep a reference before clearing the so_head 644 * to avoid racing socket close in netisr. 645 */ 646 soreference(so); 647 if (clr_head) 648 so->so_head = NULL; 649 so_pru_abort_async(so); 650 } 651 652 void 653 soabort_direct(struct socket *so) 654 { 655 soreference(so); 656 so_pru_abort_direct(so); 657 } 658 659 /* 660 * so is passed in ref'd, which becomes owned by 661 * the cleared SS_NOFDREF flag. 662 */ 663 void 664 soaccept_generic(struct socket *so) 665 { 666 if ((so->so_state & SS_NOFDREF) == 0) 667 panic("soaccept: !NOFDREF"); 668 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 669 } 670 671 int 672 soaccept(struct socket *so, struct sockaddr **nam) 673 { 674 int error; 675 676 soaccept_generic(so); 677 error = so_pru_accept(so, nam); 678 return (error); 679 } 680 681 int 682 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 683 boolean_t sync) 684 { 685 int error; 686 687 if (so->so_options & SO_ACCEPTCONN) 688 return (EOPNOTSUPP); 689 /* 690 * If protocol is connection-based, can only connect once. 691 * Otherwise, if connected, try to disconnect first. 692 * This allows user to disconnect by connecting to, e.g., 693 * a null address. 694 */ 695 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 696 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 697 (error = sodisconnect(so)))) { 698 error = EISCONN; 699 } else { 700 /* 701 * Prevent accumulated error from previous connection 702 * from biting us. 703 */ 704 so->so_error = 0; 705 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 706 error = so_pru_connect_async(so, nam, td); 707 else 708 error = so_pru_connect(so, nam, td); 709 } 710 return (error); 711 } 712 713 int 714 soconnect2(struct socket *so1, struct socket *so2) 715 { 716 int error; 717 718 error = so_pru_connect2(so1, so2); 719 return (error); 720 } 721 722 int 723 sodisconnect(struct socket *so) 724 { 725 int error; 726 727 if ((so->so_state & SS_ISCONNECTED) == 0) { 728 error = ENOTCONN; 729 goto bad; 730 } 731 if (so->so_state & SS_ISDISCONNECTING) { 732 error = EALREADY; 733 goto bad; 734 } 735 error = so_pru_disconnect(so); 736 bad: 737 return (error); 738 } 739 740 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 741 /* 742 * Send on a socket. 743 * If send must go all at once and message is larger than 744 * send buffering, then hard error. 745 * Lock against other senders. 746 * If must go all at once and not enough room now, then 747 * inform user that this would block and do nothing. 748 * Otherwise, if nonblocking, send as much as possible. 749 * The data to be sent is described by "uio" if nonzero, 750 * otherwise by the mbuf chain "top" (which must be null 751 * if uio is not). Data provided in mbuf chain must be small 752 * enough to send all at once. 753 * 754 * Returns nonzero on error, timeout or signal; callers 755 * must check for short counts if EINTR/ERESTART are returned. 756 * Data and control buffers are freed on return. 757 */ 758 int 759 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 760 struct mbuf *top, struct mbuf *control, int flags, 761 struct thread *td) 762 { 763 struct mbuf **mp; 764 struct mbuf *m; 765 size_t resid; 766 int space, len; 767 int clen = 0, error, dontroute, mlen; 768 int atomic = sosendallatonce(so) || top; 769 int pru_flags; 770 771 if (uio) { 772 resid = uio->uio_resid; 773 } else { 774 resid = (size_t)top->m_pkthdr.len; 775 #ifdef INVARIANTS 776 len = 0; 777 for (m = top; m; m = m->m_next) 778 len += m->m_len; 779 KKASSERT(top->m_pkthdr.len == len); 780 #endif 781 } 782 783 /* 784 * WARNING! resid is unsigned, space and len are signed. space 785 * can wind up negative if the sockbuf is overcommitted. 786 * 787 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 788 * type sockets since that's an error. 789 */ 790 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 791 error = EINVAL; 792 goto out; 793 } 794 795 dontroute = 796 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 797 (so->so_proto->pr_flags & PR_ATOMIC); 798 if (td->td_lwp != NULL) 799 td->td_lwp->lwp_ru.ru_msgsnd++; 800 if (control) 801 clen = control->m_len; 802 #define gotoerr(errcode) { error = errcode; goto release; } 803 804 restart: 805 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 806 if (error) 807 goto out; 808 809 do { 810 if (so->so_state & SS_CANTSENDMORE) 811 gotoerr(EPIPE); 812 if (so->so_error) { 813 error = so->so_error; 814 so->so_error = 0; 815 goto release; 816 } 817 if ((so->so_state & SS_ISCONNECTED) == 0) { 818 /* 819 * `sendto' and `sendmsg' is allowed on a connection- 820 * based socket if it supports implied connect. 821 * Return ENOTCONN if not connected and no address is 822 * supplied. 823 */ 824 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 825 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 826 if ((so->so_state & SS_ISCONFIRMING) == 0 && 827 !(resid == 0 && clen != 0)) 828 gotoerr(ENOTCONN); 829 } else if (addr == NULL) 830 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 831 ENOTCONN : EDESTADDRREQ); 832 } 833 if ((atomic && resid > so->so_snd.ssb_hiwat) || 834 clen > so->so_snd.ssb_hiwat) { 835 gotoerr(EMSGSIZE); 836 } 837 space = ssb_space(&so->so_snd); 838 if (flags & MSG_OOB) 839 space += 1024; 840 if ((space < 0 || (size_t)space < resid + clen) && uio && 841 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 842 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 843 gotoerr(EWOULDBLOCK); 844 ssb_unlock(&so->so_snd); 845 error = ssb_wait(&so->so_snd); 846 if (error) 847 goto out; 848 goto restart; 849 } 850 mp = ⊤ 851 space -= clen; 852 do { 853 if (uio == NULL) { 854 /* 855 * Data is prepackaged in "top". 856 */ 857 resid = 0; 858 if (flags & MSG_EOR) 859 top->m_flags |= M_EOR; 860 } else do { 861 if (resid > INT_MAX) 862 resid = INT_MAX; 863 m = m_getl((int)resid, M_WAITOK, MT_DATA, 864 top == NULL ? M_PKTHDR : 0, &mlen); 865 if (top == NULL) { 866 m->m_pkthdr.len = 0; 867 m->m_pkthdr.rcvif = NULL; 868 } 869 len = imin((int)szmin(mlen, resid), space); 870 if (resid < MINCLSIZE) { 871 /* 872 * For datagram protocols, leave room 873 * for protocol headers in first mbuf. 874 */ 875 if (atomic && top == NULL && len < mlen) 876 MH_ALIGN(m, len); 877 } 878 space -= len; 879 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 880 resid = uio->uio_resid; 881 m->m_len = len; 882 *mp = m; 883 top->m_pkthdr.len += len; 884 if (error) 885 goto release; 886 mp = &m->m_next; 887 if (resid == 0) { 888 if (flags & MSG_EOR) 889 top->m_flags |= M_EOR; 890 break; 891 } 892 } while (space > 0 && atomic); 893 if (dontroute) 894 so->so_options |= SO_DONTROUTE; 895 if (flags & MSG_OOB) { 896 pru_flags = PRUS_OOB; 897 } else if ((flags & MSG_EOF) && 898 (so->so_proto->pr_flags & PR_IMPLOPCL) && 899 (resid == 0)) { 900 /* 901 * If the user set MSG_EOF, the protocol 902 * understands this flag and nothing left to 903 * send then use PRU_SEND_EOF instead of PRU_SEND. 904 */ 905 pru_flags = PRUS_EOF; 906 } else if (resid > 0 && space > 0) { 907 /* If there is more to send, set PRUS_MORETOCOME */ 908 pru_flags = PRUS_MORETOCOME; 909 } else { 910 pru_flags = 0; 911 } 912 /* 913 * XXX all the SS_CANTSENDMORE checks previously 914 * done could be out of date. We could have recieved 915 * a reset packet in an interrupt or maybe we slept 916 * while doing page faults in uiomove() etc. We could 917 * probably recheck again inside the splnet() protection 918 * here, but there are probably other places that this 919 * also happens. We must rethink this. 920 */ 921 error = so_pru_send(so, pru_flags, top, addr, control, td); 922 if (dontroute) 923 so->so_options &= ~SO_DONTROUTE; 924 clen = 0; 925 control = NULL; 926 top = NULL; 927 mp = ⊤ 928 if (error) 929 goto release; 930 } while (resid && space > 0); 931 } while (resid); 932 933 release: 934 ssb_unlock(&so->so_snd); 935 out: 936 if (top) 937 m_freem(top); 938 if (control) 939 m_freem(control); 940 return (error); 941 } 942 943 #ifdef INET 944 /* 945 * A specialization of sosend() for UDP based on protocol-specific knowledge: 946 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 947 * sosendallatonce() returns true, 948 * the "atomic" variable is true, 949 * and sosendudp() blocks until space is available for the entire send. 950 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 951 * PR_IMPLOPCL flags set. 952 * UDP has no out-of-band data. 953 * UDP has no control data. 954 * UDP does not support MSG_EOR. 955 */ 956 int 957 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 958 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 959 { 960 size_t resid; 961 int error, pru_flags = 0; 962 int space; 963 964 if (td->td_lwp != NULL) 965 td->td_lwp->lwp_ru.ru_msgsnd++; 966 if (control) 967 m_freem(control); 968 969 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 970 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 971 972 restart: 973 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 974 if (error) 975 goto out; 976 977 if (so->so_state & SS_CANTSENDMORE) 978 gotoerr(EPIPE); 979 if (so->so_error) { 980 error = so->so_error; 981 so->so_error = 0; 982 goto release; 983 } 984 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 985 gotoerr(EDESTADDRREQ); 986 if (resid > so->so_snd.ssb_hiwat) 987 gotoerr(EMSGSIZE); 988 space = ssb_space(&so->so_snd); 989 if (uio && (space < 0 || (size_t)space < resid)) { 990 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 991 gotoerr(EWOULDBLOCK); 992 ssb_unlock(&so->so_snd); 993 error = ssb_wait(&so->so_snd); 994 if (error) 995 goto out; 996 goto restart; 997 } 998 999 if (uio) { 1000 int hdrlen = max_hdr; 1001 1002 /* 1003 * We try to optimize out the additional mbuf 1004 * allocations in M_PREPEND() on output path, e.g. 1005 * - udp_output(), when it tries to prepend protocol 1006 * headers. 1007 * - Link layer output function, when it tries to 1008 * prepend link layer header. 1009 * 1010 * This probably will not benefit any data that will 1011 * be fragmented, so this optimization is only performed 1012 * when the size of data and max size of protocol+link 1013 * headers fit into one mbuf cluster. 1014 */ 1015 if (uio->uio_resid > MCLBYTES - hdrlen || 1016 !udp_sosend_prepend) { 1017 top = m_uiomove(uio); 1018 if (top == NULL) 1019 goto release; 1020 } else { 1021 int nsize; 1022 1023 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1024 MT_DATA, M_PKTHDR, &nsize); 1025 KASSERT(nsize >= uio->uio_resid + hdrlen, 1026 ("sosendudp invalid nsize %d, " 1027 "resid %zu, hdrlen %d", 1028 nsize, uio->uio_resid, hdrlen)); 1029 1030 top->m_len = uio->uio_resid; 1031 top->m_pkthdr.len = uio->uio_resid; 1032 top->m_data += hdrlen; 1033 1034 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1035 if (error) 1036 goto out; 1037 } 1038 } 1039 1040 if (flags & MSG_DONTROUTE) 1041 pru_flags |= PRUS_DONTROUTE; 1042 1043 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1044 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1045 error = 0; 1046 } else { 1047 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1048 } 1049 top = NULL; /* sent or freed in lower layer */ 1050 1051 release: 1052 ssb_unlock(&so->so_snd); 1053 out: 1054 if (top) 1055 m_freem(top); 1056 return (error); 1057 } 1058 1059 int 1060 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1061 struct mbuf *top, struct mbuf *control, int flags, 1062 struct thread *td) 1063 { 1064 struct mbuf **mp; 1065 struct mbuf *m; 1066 size_t resid; 1067 int space, len; 1068 int error, mlen; 1069 int allatonce; 1070 int pru_flags; 1071 1072 if (uio) { 1073 KKASSERT(top == NULL); 1074 allatonce = 0; 1075 resid = uio->uio_resid; 1076 } else { 1077 allatonce = 1; 1078 resid = (size_t)top->m_pkthdr.len; 1079 #ifdef INVARIANTS 1080 len = 0; 1081 for (m = top; m; m = m->m_next) 1082 len += m->m_len; 1083 KKASSERT(top->m_pkthdr.len == len); 1084 #endif 1085 } 1086 1087 /* 1088 * WARNING! resid is unsigned, space and len are signed. space 1089 * can wind up negative if the sockbuf is overcommitted. 1090 * 1091 * Also check to make sure that MSG_EOR isn't used on TCP 1092 */ 1093 if (flags & MSG_EOR) { 1094 error = EINVAL; 1095 goto out; 1096 } 1097 1098 if (control) { 1099 /* TCP doesn't do control messages (rights, creds, etc) */ 1100 if (control->m_len) { 1101 error = EINVAL; 1102 goto out; 1103 } 1104 m_freem(control); /* empty control, just free it */ 1105 control = NULL; 1106 } 1107 1108 if (td->td_lwp != NULL) 1109 td->td_lwp->lwp_ru.ru_msgsnd++; 1110 1111 #define gotoerr(errcode) { error = errcode; goto release; } 1112 1113 restart: 1114 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1115 if (error) 1116 goto out; 1117 1118 do { 1119 if (so->so_state & SS_CANTSENDMORE) 1120 gotoerr(EPIPE); 1121 if (so->so_error) { 1122 error = so->so_error; 1123 so->so_error = 0; 1124 goto release; 1125 } 1126 if ((so->so_state & SS_ISCONNECTED) == 0 && 1127 (so->so_state & SS_ISCONFIRMING) == 0) 1128 gotoerr(ENOTCONN); 1129 if (allatonce && resid > so->so_snd.ssb_hiwat) 1130 gotoerr(EMSGSIZE); 1131 1132 space = ssb_space_prealloc(&so->so_snd); 1133 if (flags & MSG_OOB) 1134 space += 1024; 1135 if ((space < 0 || (size_t)space < resid) && !allatonce && 1136 space < so->so_snd.ssb_lowat) { 1137 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1138 gotoerr(EWOULDBLOCK); 1139 ssb_unlock(&so->so_snd); 1140 error = ssb_wait(&so->so_snd); 1141 if (error) 1142 goto out; 1143 goto restart; 1144 } 1145 mp = ⊤ 1146 do { 1147 int cnt = 0, async = 0; 1148 1149 if (uio == NULL) { 1150 /* 1151 * Data is prepackaged in "top". 1152 */ 1153 resid = 0; 1154 } else do { 1155 if (resid > INT_MAX) 1156 resid = INT_MAX; 1157 if (tcp_sosend_jcluster) { 1158 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1159 top == NULL ? M_PKTHDR : 0, &mlen); 1160 } else { 1161 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1162 top == NULL ? M_PKTHDR : 0, &mlen); 1163 } 1164 if (top == NULL) { 1165 m->m_pkthdr.len = 0; 1166 m->m_pkthdr.rcvif = NULL; 1167 } 1168 len = imin((int)szmin(mlen, resid), space); 1169 space -= len; 1170 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1171 resid = uio->uio_resid; 1172 m->m_len = len; 1173 *mp = m; 1174 top->m_pkthdr.len += len; 1175 if (error) 1176 goto release; 1177 mp = &m->m_next; 1178 if (resid == 0) 1179 break; 1180 ++cnt; 1181 } while (space > 0 && cnt < tcp_sosend_agglim); 1182 1183 if (tcp_sosend_async) 1184 async = 1; 1185 1186 if (flags & MSG_OOB) { 1187 pru_flags = PRUS_OOB; 1188 async = 0; 1189 } else if ((flags & MSG_EOF) && resid == 0) { 1190 pru_flags = PRUS_EOF; 1191 } else if (resid > 0 && space > 0) { 1192 /* If there is more to send, set PRUS_MORETOCOME */ 1193 pru_flags = PRUS_MORETOCOME; 1194 async = 1; 1195 } else { 1196 pru_flags = 0; 1197 } 1198 1199 if (flags & MSG_SYNC) 1200 async = 0; 1201 1202 /* 1203 * XXX all the SS_CANTSENDMORE checks previously 1204 * done could be out of date. We could have recieved 1205 * a reset packet in an interrupt or maybe we slept 1206 * while doing page faults in uiomove() etc. We could 1207 * probably recheck again inside the splnet() protection 1208 * here, but there are probably other places that this 1209 * also happens. We must rethink this. 1210 */ 1211 for (m = top; m; m = m->m_next) 1212 ssb_preallocstream(&so->so_snd, m); 1213 if (!async) { 1214 error = so_pru_send(so, pru_flags, top, 1215 NULL, NULL, td); 1216 } else { 1217 so_pru_send_async(so, pru_flags, top, 1218 NULL, NULL, td); 1219 error = 0; 1220 } 1221 1222 top = NULL; 1223 mp = ⊤ 1224 if (error) 1225 goto release; 1226 } while (resid && space > 0); 1227 } while (resid); 1228 1229 release: 1230 ssb_unlock(&so->so_snd); 1231 out: 1232 if (top) 1233 m_freem(top); 1234 if (control) 1235 m_freem(control); 1236 return (error); 1237 } 1238 #endif 1239 1240 /* 1241 * Implement receive operations on a socket. 1242 * 1243 * We depend on the way that records are added to the signalsockbuf 1244 * by sbappend*. In particular, each record (mbufs linked through m_next) 1245 * must begin with an address if the protocol so specifies, 1246 * followed by an optional mbuf or mbufs containing ancillary data, 1247 * and then zero or more mbufs of data. 1248 * 1249 * Although the signalsockbuf is locked, new data may still be appended. 1250 * A token inside the ssb_lock deals with MP issues and still allows 1251 * the network to access the socket if we block in a uio. 1252 * 1253 * The caller may receive the data as a single mbuf chain by supplying 1254 * an mbuf **mp0 for use in returning the chain. The uio is then used 1255 * only for the count in uio_resid. 1256 */ 1257 int 1258 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1259 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1260 { 1261 struct mbuf *m, *n; 1262 struct mbuf *free_chain = NULL; 1263 int flags, len, error, offset; 1264 struct protosw *pr = so->so_proto; 1265 int moff, type = 0; 1266 size_t resid, orig_resid; 1267 boolean_t free_rights = FALSE; 1268 1269 if (uio) 1270 resid = uio->uio_resid; 1271 else 1272 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1273 orig_resid = resid; 1274 1275 if (psa) 1276 *psa = NULL; 1277 if (controlp) 1278 *controlp = NULL; 1279 if (flagsp) 1280 flags = *flagsp &~ MSG_EOR; 1281 else 1282 flags = 0; 1283 if (flags & MSG_OOB) { 1284 m = m_get(M_WAITOK, MT_DATA); 1285 if (m == NULL) 1286 return (ENOBUFS); 1287 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1288 if (error) 1289 goto bad; 1290 if (sio) { 1291 do { 1292 sbappend(sio, m); 1293 KKASSERT(resid >= (size_t)m->m_len); 1294 resid -= (size_t)m->m_len; 1295 } while (resid > 0 && m); 1296 } else { 1297 do { 1298 uio->uio_resid = resid; 1299 error = uiomove(mtod(m, caddr_t), 1300 (int)szmin(resid, m->m_len), 1301 uio); 1302 resid = uio->uio_resid; 1303 m = m_free(m); 1304 } while (uio->uio_resid && error == 0 && m); 1305 } 1306 bad: 1307 if (m) 1308 m_freem(m); 1309 return (error); 1310 } 1311 if ((so->so_state & SS_ISCONFIRMING) && resid) 1312 so_pru_rcvd(so, 0); 1313 1314 /* 1315 * The token interlocks against the protocol thread while 1316 * ssb_lock is a blocking lock against other userland entities. 1317 */ 1318 lwkt_gettoken(&so->so_rcv.ssb_token); 1319 restart: 1320 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1321 if (error) 1322 goto done; 1323 1324 m = so->so_rcv.ssb_mb; 1325 /* 1326 * If we have less data than requested, block awaiting more 1327 * (subject to any timeout) if: 1328 * 1. the current count is less than the low water mark, or 1329 * 2. MSG_WAITALL is set, and it is possible to do the entire 1330 * receive operation at once if we block (resid <= hiwat). 1331 * 3. MSG_DONTWAIT is not set 1332 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1333 * we have to do the receive in sections, and thus risk returning 1334 * a short count if a timeout or signal occurs after we start. 1335 */ 1336 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1337 (size_t)so->so_rcv.ssb_cc < resid) && 1338 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1339 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1340 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1341 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1342 if (so->so_error) { 1343 if (m) 1344 goto dontblock; 1345 error = so->so_error; 1346 if ((flags & MSG_PEEK) == 0) 1347 so->so_error = 0; 1348 goto release; 1349 } 1350 if (so->so_state & SS_CANTRCVMORE) { 1351 if (m) 1352 goto dontblock; 1353 else 1354 goto release; 1355 } 1356 for (; m; m = m->m_next) { 1357 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1358 m = so->so_rcv.ssb_mb; 1359 goto dontblock; 1360 } 1361 } 1362 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1363 (pr->pr_flags & PR_CONNREQUIRED)) { 1364 error = ENOTCONN; 1365 goto release; 1366 } 1367 if (resid == 0) 1368 goto release; 1369 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1370 error = EWOULDBLOCK; 1371 goto release; 1372 } 1373 ssb_unlock(&so->so_rcv); 1374 error = ssb_wait(&so->so_rcv); 1375 if (error) 1376 goto done; 1377 goto restart; 1378 } 1379 dontblock: 1380 if (uio && uio->uio_td && uio->uio_td->td_proc) 1381 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1382 1383 /* 1384 * note: m should be == sb_mb here. Cache the next record while 1385 * cleaning up. Note that calling m_free*() will break out critical 1386 * section. 1387 */ 1388 KKASSERT(m == so->so_rcv.ssb_mb); 1389 1390 /* 1391 * Skip any address mbufs prepending the record. 1392 */ 1393 if (pr->pr_flags & PR_ADDR) { 1394 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1395 orig_resid = 0; 1396 if (psa) 1397 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1398 if (flags & MSG_PEEK) 1399 m = m->m_next; 1400 else 1401 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1402 } 1403 1404 /* 1405 * Skip any control mbufs prepending the record. 1406 */ 1407 while (m && m->m_type == MT_CONTROL && error == 0) { 1408 if (flags & MSG_PEEK) { 1409 if (controlp) 1410 *controlp = m_copy(m, 0, m->m_len); 1411 m = m->m_next; /* XXX race */ 1412 } else { 1413 const struct cmsghdr *cm = mtod(m, struct cmsghdr *); 1414 1415 if (controlp) { 1416 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1417 if (pr->pr_domain->dom_externalize && 1418 cm->cmsg_level == SOL_SOCKET && 1419 cm->cmsg_type == SCM_RIGHTS) { 1420 error = pr->pr_domain->dom_externalize 1421 (m, flags); 1422 } 1423 *controlp = m; 1424 m = n; 1425 } else { 1426 if (cm->cmsg_level == SOL_SOCKET && 1427 cm->cmsg_type == SCM_RIGHTS) 1428 free_rights = TRUE; 1429 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1430 } 1431 } 1432 if (controlp && *controlp) { 1433 orig_resid = 0; 1434 controlp = &(*controlp)->m_next; 1435 } 1436 } 1437 1438 /* 1439 * flag OOB data. 1440 */ 1441 if (m) { 1442 type = m->m_type; 1443 if (type == MT_OOBDATA) 1444 flags |= MSG_OOB; 1445 } 1446 1447 /* 1448 * Copy to the UIO or mbuf return chain (*mp). 1449 */ 1450 moff = 0; 1451 offset = 0; 1452 while (m && resid > 0 && error == 0) { 1453 if (m->m_type == MT_OOBDATA) { 1454 if (type != MT_OOBDATA) 1455 break; 1456 } else if (type == MT_OOBDATA) 1457 break; 1458 else 1459 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1460 ("receive 3")); 1461 soclrstate(so, SS_RCVATMARK); 1462 len = (resid > INT_MAX) ? INT_MAX : resid; 1463 if (so->so_oobmark && len > so->so_oobmark - offset) 1464 len = so->so_oobmark - offset; 1465 if (len > m->m_len - moff) 1466 len = m->m_len - moff; 1467 1468 /* 1469 * Copy out to the UIO or pass the mbufs back to the SIO. 1470 * The SIO is dealt with when we eat the mbuf, but deal 1471 * with the resid here either way. 1472 */ 1473 if (uio) { 1474 uio->uio_resid = resid; 1475 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1476 resid = uio->uio_resid; 1477 if (error) 1478 goto release; 1479 } else { 1480 resid -= (size_t)len; 1481 } 1482 1483 /* 1484 * Eat the entire mbuf or just a piece of it 1485 */ 1486 if (len == m->m_len - moff) { 1487 if (m->m_flags & M_EOR) 1488 flags |= MSG_EOR; 1489 if (flags & MSG_PEEK) { 1490 m = m->m_next; 1491 moff = 0; 1492 } else { 1493 if (sio) { 1494 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1495 sbappend(sio, m); 1496 m = n; 1497 } else { 1498 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1499 } 1500 } 1501 } else { 1502 if (flags & MSG_PEEK) { 1503 moff += len; 1504 } else { 1505 if (sio) { 1506 n = m_copym(m, 0, len, M_WAITOK); 1507 if (n) 1508 sbappend(sio, n); 1509 } 1510 m->m_data += len; 1511 m->m_len -= len; 1512 so->so_rcv.ssb_cc -= len; 1513 } 1514 } 1515 if (so->so_oobmark) { 1516 if ((flags & MSG_PEEK) == 0) { 1517 so->so_oobmark -= len; 1518 if (so->so_oobmark == 0) { 1519 sosetstate(so, SS_RCVATMARK); 1520 break; 1521 } 1522 } else { 1523 offset += len; 1524 if (offset == so->so_oobmark) 1525 break; 1526 } 1527 } 1528 if (flags & MSG_EOR) 1529 break; 1530 /* 1531 * If the MSG_WAITALL flag is set (for non-atomic socket), 1532 * we must not quit until resid == 0 or an error 1533 * termination. If a signal/timeout occurs, return 1534 * with a short count but without error. 1535 * Keep signalsockbuf locked against other readers. 1536 */ 1537 while ((flags & MSG_WAITALL) && m == NULL && 1538 resid > 0 && !sosendallatonce(so) && 1539 so->so_rcv.ssb_mb == NULL) { 1540 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1541 break; 1542 /* 1543 * The window might have closed to zero, make 1544 * sure we send an ack now that we've drained 1545 * the buffer or we might end up blocking until 1546 * the idle takes over (5 seconds). 1547 */ 1548 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1549 so_pru_rcvd(so, flags); 1550 error = ssb_wait(&so->so_rcv); 1551 if (error) { 1552 ssb_unlock(&so->so_rcv); 1553 error = 0; 1554 goto done; 1555 } 1556 m = so->so_rcv.ssb_mb; 1557 } 1558 } 1559 1560 /* 1561 * If an atomic read was requested but unread data still remains 1562 * in the record, set MSG_TRUNC. 1563 */ 1564 if (m && pr->pr_flags & PR_ATOMIC) 1565 flags |= MSG_TRUNC; 1566 1567 /* 1568 * Cleanup. If an atomic read was requested drop any unread data. 1569 */ 1570 if ((flags & MSG_PEEK) == 0) { 1571 if (m && (pr->pr_flags & PR_ATOMIC)) 1572 sbdroprecord(&so->so_rcv.sb); 1573 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1574 so_pru_rcvd(so, flags); 1575 } 1576 1577 if (orig_resid == resid && orig_resid && 1578 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1579 ssb_unlock(&so->so_rcv); 1580 goto restart; 1581 } 1582 1583 if (flagsp) 1584 *flagsp |= flags; 1585 release: 1586 ssb_unlock(&so->so_rcv); 1587 done: 1588 lwkt_reltoken(&so->so_rcv.ssb_token); 1589 if (free_chain) { 1590 if (free_rights && (pr->pr_flags & PR_RIGHTS) && 1591 pr->pr_domain->dom_dispose) 1592 pr->pr_domain->dom_dispose(free_chain); 1593 m_freem(free_chain); 1594 } 1595 return (error); 1596 } 1597 1598 int 1599 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1600 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1601 { 1602 struct mbuf *m, *n; 1603 struct mbuf *free_chain = NULL; 1604 int flags, len, error, offset; 1605 struct protosw *pr = so->so_proto; 1606 int moff; 1607 int didoob; 1608 size_t resid, orig_resid, restmp; 1609 1610 if (uio) 1611 resid = uio->uio_resid; 1612 else 1613 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1614 orig_resid = resid; 1615 1616 if (psa) 1617 *psa = NULL; 1618 if (controlp) 1619 *controlp = NULL; 1620 if (flagsp) 1621 flags = *flagsp &~ MSG_EOR; 1622 else 1623 flags = 0; 1624 if (flags & MSG_OOB) { 1625 m = m_get(M_WAITOK, MT_DATA); 1626 if (m == NULL) 1627 return (ENOBUFS); 1628 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1629 if (error) 1630 goto bad; 1631 if (sio) { 1632 do { 1633 sbappend(sio, m); 1634 KKASSERT(resid >= (size_t)m->m_len); 1635 resid -= (size_t)m->m_len; 1636 } while (resid > 0 && m); 1637 } else { 1638 do { 1639 uio->uio_resid = resid; 1640 error = uiomove(mtod(m, caddr_t), 1641 (int)szmin(resid, m->m_len), 1642 uio); 1643 resid = uio->uio_resid; 1644 m = m_free(m); 1645 } while (uio->uio_resid && error == 0 && m); 1646 } 1647 bad: 1648 if (m) 1649 m_freem(m); 1650 return (error); 1651 } 1652 1653 /* 1654 * The token interlocks against the protocol thread while 1655 * ssb_lock is a blocking lock against other userland entities. 1656 * 1657 * Lock a limited number of mbufs (not all, so sbcompress() still 1658 * works well). The token is used as an interlock for sbwait() so 1659 * release it afterwords. 1660 */ 1661 restart: 1662 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1663 if (error) 1664 goto done; 1665 1666 lwkt_gettoken(&so->so_rcv.ssb_token); 1667 m = so->so_rcv.ssb_mb; 1668 1669 /* 1670 * If we have less data than requested, block awaiting more 1671 * (subject to any timeout) if: 1672 * 1. the current count is less than the low water mark, or 1673 * 2. MSG_WAITALL is set, and it is possible to do the entire 1674 * receive operation at once if we block (resid <= hiwat). 1675 * 3. MSG_DONTWAIT is not set 1676 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1677 * we have to do the receive in sections, and thus risk returning 1678 * a short count if a timeout or signal occurs after we start. 1679 */ 1680 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1681 (size_t)so->so_rcv.ssb_cc < resid) && 1682 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1683 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1684 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1685 if (so->so_error) { 1686 if (m) 1687 goto dontblock; 1688 lwkt_reltoken(&so->so_rcv.ssb_token); 1689 error = so->so_error; 1690 if ((flags & MSG_PEEK) == 0) 1691 so->so_error = 0; 1692 goto release; 1693 } 1694 if (so->so_state & SS_CANTRCVMORE) { 1695 if (m) 1696 goto dontblock; 1697 lwkt_reltoken(&so->so_rcv.ssb_token); 1698 goto release; 1699 } 1700 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1701 (pr->pr_flags & PR_CONNREQUIRED)) { 1702 lwkt_reltoken(&so->so_rcv.ssb_token); 1703 error = ENOTCONN; 1704 goto release; 1705 } 1706 if (resid == 0) { 1707 lwkt_reltoken(&so->so_rcv.ssb_token); 1708 goto release; 1709 } 1710 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1711 lwkt_reltoken(&so->so_rcv.ssb_token); 1712 error = EWOULDBLOCK; 1713 goto release; 1714 } 1715 ssb_unlock(&so->so_rcv); 1716 error = ssb_wait(&so->so_rcv); 1717 lwkt_reltoken(&so->so_rcv.ssb_token); 1718 if (error) 1719 goto done; 1720 goto restart; 1721 } 1722 1723 /* 1724 * Token still held 1725 */ 1726 dontblock: 1727 n = m; 1728 restmp = 0; 1729 while (n && restmp < resid) { 1730 n->m_flags |= M_SOLOCKED; 1731 restmp += n->m_len; 1732 if (n->m_next == NULL) 1733 n = n->m_nextpkt; 1734 else 1735 n = n->m_next; 1736 } 1737 1738 /* 1739 * Release token for loop 1740 */ 1741 lwkt_reltoken(&so->so_rcv.ssb_token); 1742 if (uio && uio->uio_td && uio->uio_td->td_proc) 1743 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1744 1745 /* 1746 * note: m should be == sb_mb here. Cache the next record while 1747 * cleaning up. Note that calling m_free*() will break out critical 1748 * section. 1749 */ 1750 KKASSERT(m == so->so_rcv.ssb_mb); 1751 1752 /* 1753 * Copy to the UIO or mbuf return chain (*mp). 1754 * 1755 * NOTE: Token is not held for loop 1756 */ 1757 moff = 0; 1758 offset = 0; 1759 didoob = 0; 1760 1761 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1762 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1763 ("receive 3")); 1764 1765 soclrstate(so, SS_RCVATMARK); 1766 len = (resid > INT_MAX) ? INT_MAX : resid; 1767 if (so->so_oobmark && len > so->so_oobmark - offset) 1768 len = so->so_oobmark - offset; 1769 if (len > m->m_len - moff) 1770 len = m->m_len - moff; 1771 1772 /* 1773 * Copy out to the UIO or pass the mbufs back to the SIO. 1774 * The SIO is dealt with when we eat the mbuf, but deal 1775 * with the resid here either way. 1776 */ 1777 if (uio) { 1778 uio->uio_resid = resid; 1779 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1780 resid = uio->uio_resid; 1781 if (error) 1782 goto release; 1783 } else { 1784 resid -= (size_t)len; 1785 } 1786 1787 /* 1788 * Eat the entire mbuf or just a piece of it 1789 */ 1790 offset += len; 1791 if (len == m->m_len - moff) { 1792 m = m->m_next; 1793 moff = 0; 1794 } else { 1795 moff += len; 1796 } 1797 1798 /* 1799 * Check oobmark 1800 */ 1801 if (so->so_oobmark && offset == so->so_oobmark) { 1802 didoob = 1; 1803 break; 1804 } 1805 } 1806 1807 /* 1808 * Synchronize sockbuf with data we read. 1809 * 1810 * NOTE: (m) is junk on entry (it could be left over from the 1811 * previous loop). 1812 */ 1813 if ((flags & MSG_PEEK) == 0) { 1814 lwkt_gettoken(&so->so_rcv.ssb_token); 1815 m = so->so_rcv.ssb_mb; 1816 while (m && offset >= m->m_len) { 1817 if (so->so_oobmark) { 1818 so->so_oobmark -= m->m_len; 1819 if (so->so_oobmark == 0) { 1820 sosetstate(so, SS_RCVATMARK); 1821 didoob = 1; 1822 } 1823 } 1824 offset -= m->m_len; 1825 if (sio) { 1826 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1827 sbappend(sio, m); 1828 m = n; 1829 } else { 1830 m = sbunlinkmbuf(&so->so_rcv.sb, 1831 m, &free_chain); 1832 } 1833 } 1834 if (offset) { 1835 KKASSERT(m); 1836 if (sio) { 1837 n = m_copym(m, 0, offset, M_WAITOK); 1838 if (n) 1839 sbappend(sio, n); 1840 } 1841 m->m_data += offset; 1842 m->m_len -= offset; 1843 so->so_rcv.ssb_cc -= offset; 1844 if (so->so_oobmark) { 1845 so->so_oobmark -= offset; 1846 if (so->so_oobmark == 0) { 1847 sosetstate(so, SS_RCVATMARK); 1848 didoob = 1; 1849 } 1850 } 1851 offset = 0; 1852 } 1853 lwkt_reltoken(&so->so_rcv.ssb_token); 1854 } 1855 1856 /* 1857 * If the MSG_WAITALL flag is set (for non-atomic socket), 1858 * we must not quit until resid == 0 or an error termination. 1859 * 1860 * If a signal/timeout occurs, return with a short count but without 1861 * error. 1862 * 1863 * Keep signalsockbuf locked against other readers. 1864 * 1865 * XXX if MSG_PEEK we currently do quit. 1866 */ 1867 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1868 didoob == 0 && resid > 0 && 1869 !sosendallatonce(so)) { 1870 lwkt_gettoken(&so->so_rcv.ssb_token); 1871 error = 0; 1872 while ((m = so->so_rcv.ssb_mb) == NULL) { 1873 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1874 error = so->so_error; 1875 break; 1876 } 1877 /* 1878 * The window might have closed to zero, make 1879 * sure we send an ack now that we've drained 1880 * the buffer or we might end up blocking until 1881 * the idle takes over (5 seconds). 1882 */ 1883 if (so->so_pcb) 1884 so_pru_rcvd_async(so); 1885 if (so->so_rcv.ssb_mb == NULL) 1886 error = ssb_wait(&so->so_rcv); 1887 if (error) { 1888 lwkt_reltoken(&so->so_rcv.ssb_token); 1889 ssb_unlock(&so->so_rcv); 1890 error = 0; 1891 goto done; 1892 } 1893 } 1894 if (m && error == 0) 1895 goto dontblock; 1896 lwkt_reltoken(&so->so_rcv.ssb_token); 1897 } 1898 1899 /* 1900 * Token not held here. 1901 * 1902 * Cleanup. If an atomic read was requested drop any unread data XXX 1903 */ 1904 if ((flags & MSG_PEEK) == 0) { 1905 if (so->so_pcb) 1906 so_pru_rcvd_async(so); 1907 } 1908 1909 if (orig_resid == resid && orig_resid && 1910 (so->so_state & SS_CANTRCVMORE) == 0) { 1911 ssb_unlock(&so->so_rcv); 1912 goto restart; 1913 } 1914 1915 if (flagsp) 1916 *flagsp |= flags; 1917 release: 1918 ssb_unlock(&so->so_rcv); 1919 done: 1920 if (free_chain) 1921 m_freem(free_chain); 1922 return (error); 1923 } 1924 1925 /* 1926 * Shut a socket down. Note that we do not get a frontend lock as we 1927 * want to be able to shut the socket down even if another thread is 1928 * blocked in a read(), thus waking it up. 1929 */ 1930 int 1931 soshutdown(struct socket *so, int how) 1932 { 1933 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1934 return (EINVAL); 1935 1936 if (how != SHUT_WR) { 1937 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1938 sorflush(so); 1939 /*ssb_unlock(&so->so_rcv);*/ 1940 } 1941 if (how != SHUT_RD) 1942 return (so_pru_shutdown(so)); 1943 return (0); 1944 } 1945 1946 void 1947 sorflush(struct socket *so) 1948 { 1949 struct signalsockbuf *ssb = &so->so_rcv; 1950 struct protosw *pr = so->so_proto; 1951 struct signalsockbuf asb; 1952 1953 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1954 1955 lwkt_gettoken(&ssb->ssb_token); 1956 socantrcvmore(so); 1957 asb = *ssb; 1958 1959 /* 1960 * Can't just blow up the ssb structure here 1961 */ 1962 bzero(&ssb->sb, sizeof(ssb->sb)); 1963 ssb->ssb_timeo = 0; 1964 ssb->ssb_lowat = 0; 1965 ssb->ssb_hiwat = 0; 1966 ssb->ssb_mbmax = 0; 1967 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1968 1969 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1970 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1971 ssb_release(&asb, so); 1972 1973 lwkt_reltoken(&ssb->ssb_token); 1974 } 1975 1976 #ifdef INET 1977 static int 1978 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1979 { 1980 struct accept_filter_arg *afap = NULL; 1981 struct accept_filter *afp; 1982 struct so_accf *af = so->so_accf; 1983 int error = 0; 1984 1985 /* do not set/remove accept filters on non listen sockets */ 1986 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1987 error = EINVAL; 1988 goto out; 1989 } 1990 1991 /* removing the filter */ 1992 if (sopt == NULL) { 1993 if (af != NULL) { 1994 if (af->so_accept_filter != NULL && 1995 af->so_accept_filter->accf_destroy != NULL) { 1996 af->so_accept_filter->accf_destroy(so); 1997 } 1998 if (af->so_accept_filter_str != NULL) { 1999 kfree(af->so_accept_filter_str, M_ACCF); 2000 } 2001 kfree(af, M_ACCF); 2002 so->so_accf = NULL; 2003 } 2004 so->so_options &= ~SO_ACCEPTFILTER; 2005 return (0); 2006 } 2007 /* adding a filter */ 2008 /* must remove previous filter first */ 2009 if (af != NULL) { 2010 error = EINVAL; 2011 goto out; 2012 } 2013 /* don't put large objects on the kernel stack */ 2014 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2015 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2016 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2017 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2018 if (error) 2019 goto out; 2020 afp = accept_filt_get(afap->af_name); 2021 if (afp == NULL) { 2022 error = ENOENT; 2023 goto out; 2024 } 2025 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2026 if (afp->accf_create != NULL) { 2027 if (afap->af_name[0] != '\0') { 2028 int len = strlen(afap->af_name) + 1; 2029 2030 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2031 M_WAITOK); 2032 strcpy(af->so_accept_filter_str, afap->af_name); 2033 } 2034 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2035 if (af->so_accept_filter_arg == NULL) { 2036 kfree(af->so_accept_filter_str, M_ACCF); 2037 kfree(af, M_ACCF); 2038 so->so_accf = NULL; 2039 error = EINVAL; 2040 goto out; 2041 } 2042 } 2043 af->so_accept_filter = afp; 2044 so->so_accf = af; 2045 so->so_options |= SO_ACCEPTFILTER; 2046 out: 2047 if (afap != NULL) 2048 kfree(afap, M_TEMP); 2049 return (error); 2050 } 2051 #endif /* INET */ 2052 2053 /* 2054 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2055 * an additional variant to handle the case where the option value needs 2056 * to be some kind of integer, but not a specific size. 2057 * In addition to their use here, these functions are also called by the 2058 * protocol-level pr_ctloutput() routines. 2059 */ 2060 int 2061 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2062 { 2063 return soopt_to_kbuf(sopt, buf, len, minlen); 2064 } 2065 2066 int 2067 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2068 { 2069 size_t valsize; 2070 2071 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2072 KKASSERT(kva_p(buf)); 2073 2074 /* 2075 * If the user gives us more than we wanted, we ignore it, 2076 * but if we don't get the minimum length the caller 2077 * wants, we return EINVAL. On success, sopt->sopt_valsize 2078 * is set to however much we actually retrieved. 2079 */ 2080 if ((valsize = sopt->sopt_valsize) < minlen) 2081 return EINVAL; 2082 if (valsize > len) 2083 sopt->sopt_valsize = valsize = len; 2084 2085 bcopy(sopt->sopt_val, buf, valsize); 2086 return 0; 2087 } 2088 2089 2090 int 2091 sosetopt(struct socket *so, struct sockopt *sopt) 2092 { 2093 int error, optval; 2094 struct linger l; 2095 struct timeval tv; 2096 u_long val; 2097 struct signalsockbuf *sotmp; 2098 2099 error = 0; 2100 sopt->sopt_dir = SOPT_SET; 2101 if (sopt->sopt_level != SOL_SOCKET) { 2102 if (so->so_proto && so->so_proto->pr_ctloutput) { 2103 return (so_pr_ctloutput(so, sopt)); 2104 } 2105 error = ENOPROTOOPT; 2106 } else { 2107 switch (sopt->sopt_name) { 2108 #ifdef INET 2109 case SO_ACCEPTFILTER: 2110 error = do_setopt_accept_filter(so, sopt); 2111 if (error) 2112 goto bad; 2113 break; 2114 #endif /* INET */ 2115 case SO_LINGER: 2116 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2117 if (error) 2118 goto bad; 2119 2120 so->so_linger = l.l_linger; 2121 if (l.l_onoff) 2122 so->so_options |= SO_LINGER; 2123 else 2124 so->so_options &= ~SO_LINGER; 2125 break; 2126 2127 case SO_DEBUG: 2128 case SO_KEEPALIVE: 2129 case SO_DONTROUTE: 2130 case SO_USELOOPBACK: 2131 case SO_BROADCAST: 2132 case SO_REUSEADDR: 2133 case SO_REUSEPORT: 2134 case SO_OOBINLINE: 2135 case SO_TIMESTAMP: 2136 case SO_NOSIGPIPE: 2137 error = sooptcopyin(sopt, &optval, sizeof optval, 2138 sizeof optval); 2139 if (error) 2140 goto bad; 2141 if (optval) 2142 so->so_options |= sopt->sopt_name; 2143 else 2144 so->so_options &= ~sopt->sopt_name; 2145 break; 2146 2147 case SO_SNDBUF: 2148 case SO_RCVBUF: 2149 case SO_SNDLOWAT: 2150 case SO_RCVLOWAT: 2151 error = sooptcopyin(sopt, &optval, sizeof optval, 2152 sizeof optval); 2153 if (error) 2154 goto bad; 2155 2156 /* 2157 * Values < 1 make no sense for any of these 2158 * options, so disallow them. 2159 */ 2160 if (optval < 1) { 2161 error = EINVAL; 2162 goto bad; 2163 } 2164 2165 switch (sopt->sopt_name) { 2166 case SO_SNDBUF: 2167 case SO_RCVBUF: 2168 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2169 &so->so_snd : &so->so_rcv, (u_long)optval, 2170 so, 2171 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2172 error = ENOBUFS; 2173 goto bad; 2174 } 2175 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2176 &so->so_snd : &so->so_rcv; 2177 atomic_clear_int(&sotmp->ssb_flags, 2178 SSB_AUTOSIZE); 2179 break; 2180 2181 /* 2182 * Make sure the low-water is never greater than 2183 * the high-water. 2184 */ 2185 case SO_SNDLOWAT: 2186 so->so_snd.ssb_lowat = 2187 (optval > so->so_snd.ssb_hiwat) ? 2188 so->so_snd.ssb_hiwat : optval; 2189 atomic_clear_int(&so->so_snd.ssb_flags, 2190 SSB_AUTOLOWAT); 2191 break; 2192 case SO_RCVLOWAT: 2193 so->so_rcv.ssb_lowat = 2194 (optval > so->so_rcv.ssb_hiwat) ? 2195 so->so_rcv.ssb_hiwat : optval; 2196 atomic_clear_int(&so->so_rcv.ssb_flags, 2197 SSB_AUTOLOWAT); 2198 break; 2199 } 2200 break; 2201 2202 case SO_SNDTIMEO: 2203 case SO_RCVTIMEO: 2204 error = sooptcopyin(sopt, &tv, sizeof tv, 2205 sizeof tv); 2206 if (error) 2207 goto bad; 2208 2209 /* assert(hz > 0); */ 2210 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2211 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2212 error = EDOM; 2213 goto bad; 2214 } 2215 /* assert(tick > 0); */ 2216 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2217 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2218 if (val > INT_MAX) { 2219 error = EDOM; 2220 goto bad; 2221 } 2222 if (val == 0 && tv.tv_usec != 0) 2223 val = 1; 2224 2225 switch (sopt->sopt_name) { 2226 case SO_SNDTIMEO: 2227 so->so_snd.ssb_timeo = val; 2228 break; 2229 case SO_RCVTIMEO: 2230 so->so_rcv.ssb_timeo = val; 2231 break; 2232 } 2233 break; 2234 default: 2235 error = ENOPROTOOPT; 2236 break; 2237 } 2238 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2239 (void) so_pr_ctloutput(so, sopt); 2240 } 2241 } 2242 bad: 2243 return (error); 2244 } 2245 2246 /* Helper routine for getsockopt */ 2247 int 2248 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2249 { 2250 soopt_from_kbuf(sopt, buf, len); 2251 return 0; 2252 } 2253 2254 void 2255 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2256 { 2257 size_t valsize; 2258 2259 if (len == 0) { 2260 sopt->sopt_valsize = 0; 2261 return; 2262 } 2263 2264 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2265 KKASSERT(kva_p(buf)); 2266 2267 /* 2268 * Documented get behavior is that we always return a value, 2269 * possibly truncated to fit in the user's buffer. 2270 * Traditional behavior is that we always tell the user 2271 * precisely how much we copied, rather than something useful 2272 * like the total amount we had available for her. 2273 * Note that this interface is not idempotent; the entire answer must 2274 * generated ahead of time. 2275 */ 2276 valsize = szmin(len, sopt->sopt_valsize); 2277 sopt->sopt_valsize = valsize; 2278 if (sopt->sopt_val != 0) { 2279 bcopy(buf, sopt->sopt_val, valsize); 2280 } 2281 } 2282 2283 int 2284 sogetopt(struct socket *so, struct sockopt *sopt) 2285 { 2286 int error, optval; 2287 long optval_l; 2288 struct linger l; 2289 struct timeval tv; 2290 #ifdef INET 2291 struct accept_filter_arg *afap; 2292 #endif 2293 2294 error = 0; 2295 sopt->sopt_dir = SOPT_GET; 2296 if (sopt->sopt_level != SOL_SOCKET) { 2297 if (so->so_proto && so->so_proto->pr_ctloutput) { 2298 return (so_pr_ctloutput(so, sopt)); 2299 } else 2300 return (ENOPROTOOPT); 2301 } else { 2302 switch (sopt->sopt_name) { 2303 #ifdef INET 2304 case SO_ACCEPTFILTER: 2305 if ((so->so_options & SO_ACCEPTCONN) == 0) 2306 return (EINVAL); 2307 afap = kmalloc(sizeof(*afap), M_TEMP, 2308 M_WAITOK | M_ZERO); 2309 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2310 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2311 if (so->so_accf->so_accept_filter_str != NULL) 2312 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2313 } 2314 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2315 kfree(afap, M_TEMP); 2316 break; 2317 #endif /* INET */ 2318 2319 case SO_LINGER: 2320 l.l_onoff = so->so_options & SO_LINGER; 2321 l.l_linger = so->so_linger; 2322 error = sooptcopyout(sopt, &l, sizeof l); 2323 break; 2324 2325 case SO_USELOOPBACK: 2326 case SO_DONTROUTE: 2327 case SO_DEBUG: 2328 case SO_KEEPALIVE: 2329 case SO_REUSEADDR: 2330 case SO_REUSEPORT: 2331 case SO_BROADCAST: 2332 case SO_OOBINLINE: 2333 case SO_TIMESTAMP: 2334 case SO_NOSIGPIPE: 2335 optval = so->so_options & sopt->sopt_name; 2336 integer: 2337 error = sooptcopyout(sopt, &optval, sizeof optval); 2338 break; 2339 2340 case SO_TYPE: 2341 optval = so->so_type; 2342 goto integer; 2343 2344 case SO_ERROR: 2345 optval = so->so_error; 2346 so->so_error = 0; 2347 goto integer; 2348 2349 case SO_SNDBUF: 2350 optval = so->so_snd.ssb_hiwat; 2351 goto integer; 2352 2353 case SO_RCVBUF: 2354 optval = so->so_rcv.ssb_hiwat; 2355 goto integer; 2356 2357 case SO_SNDLOWAT: 2358 optval = so->so_snd.ssb_lowat; 2359 goto integer; 2360 2361 case SO_RCVLOWAT: 2362 optval = so->so_rcv.ssb_lowat; 2363 goto integer; 2364 2365 case SO_SNDTIMEO: 2366 case SO_RCVTIMEO: 2367 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2368 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2369 2370 tv.tv_sec = optval / hz; 2371 tv.tv_usec = (optval % hz) * ustick; 2372 error = sooptcopyout(sopt, &tv, sizeof tv); 2373 break; 2374 2375 case SO_SNDSPACE: 2376 optval_l = ssb_space(&so->so_snd); 2377 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2378 break; 2379 2380 case SO_CPUHINT: 2381 optval = -1; /* no hint */ 2382 goto integer; 2383 2384 default: 2385 error = ENOPROTOOPT; 2386 break; 2387 } 2388 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2389 so_pr_ctloutput(so, sopt); 2390 return (error); 2391 } 2392 } 2393 2394 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2395 int 2396 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2397 { 2398 struct mbuf *m, *m_prev; 2399 int sopt_size = sopt->sopt_valsize, msize; 2400 2401 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2402 0, &msize); 2403 if (m == NULL) 2404 return (ENOBUFS); 2405 m->m_len = min(msize, sopt_size); 2406 sopt_size -= m->m_len; 2407 *mp = m; 2408 m_prev = m; 2409 2410 while (sopt_size > 0) { 2411 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2412 MT_DATA, 0, &msize); 2413 if (m == NULL) { 2414 m_freem(*mp); 2415 return (ENOBUFS); 2416 } 2417 m->m_len = min(msize, sopt_size); 2418 sopt_size -= m->m_len; 2419 m_prev->m_next = m; 2420 m_prev = m; 2421 } 2422 return (0); 2423 } 2424 2425 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2426 int 2427 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2428 { 2429 soopt_to_mbuf(sopt, m); 2430 return 0; 2431 } 2432 2433 void 2434 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2435 { 2436 size_t valsize; 2437 void *val; 2438 2439 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2440 KKASSERT(kva_p(m)); 2441 if (sopt->sopt_val == NULL) 2442 return; 2443 val = sopt->sopt_val; 2444 valsize = sopt->sopt_valsize; 2445 while (m != NULL && valsize >= m->m_len) { 2446 bcopy(val, mtod(m, char *), m->m_len); 2447 valsize -= m->m_len; 2448 val = (caddr_t)val + m->m_len; 2449 m = m->m_next; 2450 } 2451 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2452 panic("ip6_sooptmcopyin"); 2453 } 2454 2455 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2456 int 2457 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2458 { 2459 return soopt_from_mbuf(sopt, m); 2460 } 2461 2462 int 2463 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2464 { 2465 struct mbuf *m0 = m; 2466 size_t valsize = 0; 2467 size_t maxsize; 2468 void *val; 2469 2470 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2471 KKASSERT(kva_p(m)); 2472 if (sopt->sopt_val == NULL) 2473 return 0; 2474 val = sopt->sopt_val; 2475 maxsize = sopt->sopt_valsize; 2476 while (m != NULL && maxsize >= m->m_len) { 2477 bcopy(mtod(m, char *), val, m->m_len); 2478 maxsize -= m->m_len; 2479 val = (caddr_t)val + m->m_len; 2480 valsize += m->m_len; 2481 m = m->m_next; 2482 } 2483 if (m != NULL) { 2484 /* enough soopt buffer should be given from user-land */ 2485 m_freem(m0); 2486 return (EINVAL); 2487 } 2488 sopt->sopt_valsize = valsize; 2489 return 0; 2490 } 2491 2492 void 2493 sohasoutofband(struct socket *so) 2494 { 2495 if (so->so_sigio != NULL) 2496 pgsigio(so->so_sigio, SIGURG, 0); 2497 /* 2498 * NOTE: 2499 * There is no need to use NOTE_OOB as KNOTE hint here: 2500 * soread filter depends on so_oobmark and SS_RCVATMARK 2501 * so_state. NOTE_OOB would cause unnecessary penalty 2502 * in KNOTE, if there was knote processing contention. 2503 */ 2504 KNOTE(&so->so_rcv.ssb_kq.ki_note, 0); 2505 } 2506 2507 int 2508 sokqfilter(struct file *fp, struct knote *kn) 2509 { 2510 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2511 struct signalsockbuf *ssb; 2512 2513 switch (kn->kn_filter) { 2514 case EVFILT_READ: 2515 if (so->so_options & SO_ACCEPTCONN) 2516 kn->kn_fop = &solisten_filtops; 2517 else 2518 kn->kn_fop = &soread_filtops; 2519 ssb = &so->so_rcv; 2520 break; 2521 case EVFILT_WRITE: 2522 kn->kn_fop = &sowrite_filtops; 2523 ssb = &so->so_snd; 2524 break; 2525 case EVFILT_EXCEPT: 2526 kn->kn_fop = &soexcept_filtops; 2527 ssb = &so->so_rcv; 2528 break; 2529 default: 2530 return (EOPNOTSUPP); 2531 } 2532 2533 knote_insert(&ssb->ssb_kq.ki_note, kn); 2534 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2535 return (0); 2536 } 2537 2538 static void 2539 filt_sordetach(struct knote *kn) 2540 { 2541 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2542 2543 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2544 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2545 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2546 } 2547 2548 /*ARGSUSED*/ 2549 static int 2550 filt_soread(struct knote *kn, long hint __unused) 2551 { 2552 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2553 2554 if (kn->kn_sfflags & NOTE_OOB) { 2555 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2556 kn->kn_fflags |= NOTE_OOB; 2557 return (1); 2558 } 2559 return (0); 2560 } 2561 kn->kn_data = so->so_rcv.ssb_cc; 2562 2563 if (so->so_state & SS_CANTRCVMORE) { 2564 /* 2565 * Only set NODATA if all data has been exhausted. 2566 */ 2567 if (kn->kn_data == 0) 2568 kn->kn_flags |= EV_NODATA; 2569 kn->kn_flags |= EV_EOF; 2570 kn->kn_fflags = so->so_error; 2571 return (1); 2572 } 2573 if (so->so_error) /* temporary udp error */ 2574 return (1); 2575 if (kn->kn_sfflags & NOTE_LOWAT) 2576 return (kn->kn_data >= kn->kn_sdata); 2577 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2578 !TAILQ_EMPTY(&so->so_comp)); 2579 } 2580 2581 static void 2582 filt_sowdetach(struct knote *kn) 2583 { 2584 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2585 2586 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2587 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2588 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2589 } 2590 2591 /*ARGSUSED*/ 2592 static int 2593 filt_sowrite(struct knote *kn, long hint __unused) 2594 { 2595 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2596 2597 if (so->so_snd.ssb_flags & SSB_PREALLOC) 2598 kn->kn_data = ssb_space_prealloc(&so->so_snd); 2599 else 2600 kn->kn_data = ssb_space(&so->so_snd); 2601 2602 if (so->so_state & SS_CANTSENDMORE) { 2603 kn->kn_flags |= (EV_EOF | EV_NODATA); 2604 kn->kn_fflags = so->so_error; 2605 return (1); 2606 } 2607 if (so->so_error) /* temporary udp error */ 2608 return (1); 2609 if (((so->so_state & SS_ISCONNECTED) == 0) && 2610 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2611 return (0); 2612 if (kn->kn_sfflags & NOTE_LOWAT) 2613 return (kn->kn_data >= kn->kn_sdata); 2614 return (kn->kn_data >= so->so_snd.ssb_lowat); 2615 } 2616 2617 /*ARGSUSED*/ 2618 static int 2619 filt_solisten(struct knote *kn, long hint __unused) 2620 { 2621 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2622 int qlen = so->so_qlen; 2623 2624 if (soavailconn > 0 && qlen > soavailconn) 2625 qlen = soavailconn; 2626 kn->kn_data = qlen; 2627 2628 return (!TAILQ_EMPTY(&so->so_comp)); 2629 } 2630