1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/socketvar2.h> 93 #include <sys/spinlock2.h> 94 95 #include <machine/limits.h> 96 97 #ifdef INET 98 extern int tcp_sosend_agglim; 99 extern int tcp_sosend_async; 100 extern int tcp_sosend_jcluster; 101 extern int udp_sosend_async; 102 extern int udp_sosend_prepend; 103 104 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 105 #endif /* INET */ 106 107 static void filt_sordetach(struct knote *kn); 108 static int filt_soread(struct knote *kn, long hint); 109 static void filt_sowdetach(struct knote *kn); 110 static int filt_sowrite(struct knote *kn, long hint); 111 static int filt_solisten(struct knote *kn, long hint); 112 113 static int soclose_sync(struct socket *so, int fflag); 114 static void soclose_fast(struct socket *so); 115 116 static struct filterops solisten_filtops = 117 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 118 static struct filterops soread_filtops = 119 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 120 static struct filterops sowrite_filtops = 121 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 122 static struct filterops soexcept_filtops = 123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 124 125 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 126 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 127 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 128 129 130 static int somaxconn = SOMAXCONN; 131 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 132 &somaxconn, 0, "Maximum pending socket connection queue size"); 133 134 static int use_soclose_fast = 1; 135 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 136 &use_soclose_fast, 0, "Fast socket close"); 137 138 int use_soaccept_pred_fast = 1; 139 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 140 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 141 142 int use_sendfile_async = 1; 143 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 144 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 145 146 int use_soconnect_async = 1; 147 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 148 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 149 150 static int use_socreate_fast = 1; 151 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 152 &use_socreate_fast, 0, "Fast socket creation"); 153 154 static int soavailconn = 32; 155 SYSCTL_INT(_kern_ipc, OID_AUTO, soavailconn, CTLFLAG_RW, 156 &soavailconn, 0, "Maximum available socket connection queue size"); 157 158 /* 159 * Socket operation routines. 160 * These routines are called by the routines in 161 * sys_socket.c or from a system process, and 162 * implement the semantics of socket operations by 163 * switching out to the protocol specific routines. 164 */ 165 166 /* 167 * Get a socket structure, and initialize it. 168 * Note that it would probably be better to allocate socket 169 * and PCB at the same time, but I'm not convinced that all 170 * the protocols can be easily modified to do this. 171 */ 172 struct socket * 173 soalloc(int waitok, struct protosw *pr) 174 { 175 globaldata_t gd = mycpu; 176 struct socket *so; 177 unsigned waitmask; 178 179 waitmask = waitok ? M_WAITOK : M_NOWAIT; 180 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 181 if (so) { 182 /* XXX race condition for reentrant kernel */ 183 so->so_proto = pr; 184 TAILQ_INIT(&so->so_aiojobq); 185 TAILQ_INIT(&so->so_rcv.ssb_mlist); 186 TAILQ_INIT(&so->so_snd.ssb_mlist); 187 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 188 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 189 spin_init(&so->so_rcvd_spin, "soalloc"); 190 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 191 MSGF_DROPABLE | MSGF_PRIORITY, 192 so->so_proto->pr_usrreqs->pru_rcvd); 193 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 194 so->so_state = SS_NOFDREF; 195 so->so_refs = 1; 196 so->so_inum = gd->gd_anoninum++ * ncpus + gd->gd_cpuid + 2; 197 } 198 return so; 199 } 200 201 int 202 socreate(int dom, struct socket **aso, int type, 203 int proto, struct thread *td) 204 { 205 struct proc *p = td->td_proc; 206 struct protosw *prp; 207 struct socket *so; 208 struct pru_attach_info ai; 209 int error; 210 211 if (proto) 212 prp = pffindproto(dom, proto, type); 213 else 214 prp = pffindtype(dom, type); 215 216 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 217 return (EPROTONOSUPPORT); 218 219 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 220 prp->pr_domain->dom_family != PF_LOCAL && 221 prp->pr_domain->dom_family != PF_INET && 222 prp->pr_domain->dom_family != PF_INET6 && 223 prp->pr_domain->dom_family != PF_ROUTE) { 224 return (EPROTONOSUPPORT); 225 } 226 227 if (prp->pr_type != type) 228 return (EPROTOTYPE); 229 so = soalloc(p != NULL, prp); 230 if (so == NULL) 231 return (ENOBUFS); 232 233 /* 234 * Callers of socreate() presumably will connect up a descriptor 235 * and call soclose() if they cannot. This represents our so_refs 236 * (which should be 1) from soalloc(). 237 */ 238 soclrstate(so, SS_NOFDREF); 239 240 /* 241 * Set a default port for protocol processing. No action will occur 242 * on the socket on this port until an inpcb is attached to it and 243 * is able to match incoming packets, or until the socket becomes 244 * available to userland. 245 * 246 * We normally default the socket to the protocol thread on cpu 0, 247 * if protocol does not provide its own method to initialize the 248 * default port. 249 * 250 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 251 * thread and all pr_*()/pru_*() calls are executed synchronously. 252 */ 253 if (prp->pr_flags & PR_SYNC_PORT) 254 so->so_port = &netisr_sync_port; 255 else if (prp->pr_initport != NULL) 256 so->so_port = prp->pr_initport(); 257 else 258 so->so_port = netisr_cpuport(0); 259 260 TAILQ_INIT(&so->so_incomp); 261 TAILQ_INIT(&so->so_comp); 262 so->so_type = type; 263 so->so_cred = crhold(p->p_ucred); 264 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 265 ai.p_ucred = p->p_ucred; 266 ai.fd_rdir = p->p_fd->fd_rdir; 267 268 /* 269 * Auto-sizing of socket buffers is managed by the protocols and 270 * the appropriate flags must be set in the pru_attach function. 271 */ 272 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 273 error = so_pru_attach_fast(so, proto, &ai); 274 else 275 error = so_pru_attach(so, proto, &ai); 276 if (error) { 277 sosetstate(so, SS_NOFDREF); 278 sofree(so); /* from soalloc */ 279 return error; 280 } 281 282 /* 283 * NOTE: Returns referenced socket. 284 */ 285 *aso = so; 286 return (0); 287 } 288 289 int 290 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 291 { 292 int error; 293 294 error = so_pru_bind(so, nam, td); 295 return (error); 296 } 297 298 static void 299 sodealloc(struct socket *so) 300 { 301 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 302 303 #ifdef INVARIANTS 304 if (so->so_options & SO_ACCEPTCONN) { 305 KASSERT(TAILQ_EMPTY(&so->so_comp), ("so_comp is not empty")); 306 KASSERT(TAILQ_EMPTY(&so->so_incomp), 307 ("so_incomp is not empty")); 308 } 309 #endif 310 311 if (so->so_rcv.ssb_hiwat) 312 (void)chgsbsize(so->so_cred->cr_uidinfo, 313 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 314 if (so->so_snd.ssb_hiwat) 315 (void)chgsbsize(so->so_cred->cr_uidinfo, 316 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 317 #ifdef INET 318 /* remove accept filter if present */ 319 if (so->so_accf != NULL) 320 do_setopt_accept_filter(so, NULL); 321 #endif /* INET */ 322 crfree(so->so_cred); 323 if (so->so_faddr != NULL) 324 kfree(so->so_faddr, M_SONAME); 325 kfree(so, M_SOCKET); 326 } 327 328 int 329 solisten(struct socket *so, int backlog, struct thread *td) 330 { 331 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 332 return (EINVAL); 333 334 lwkt_gettoken(&so->so_rcv.ssb_token); 335 if (TAILQ_EMPTY(&so->so_comp)) 336 so->so_options |= SO_ACCEPTCONN; 337 lwkt_reltoken(&so->so_rcv.ssb_token); 338 if (backlog < 0 || backlog > somaxconn) 339 backlog = somaxconn; 340 so->so_qlimit = backlog; 341 return so_pru_listen(so, td); 342 } 343 344 static void 345 soqflush(struct socket *so) 346 { 347 lwkt_getpooltoken(so); 348 if (so->so_options & SO_ACCEPTCONN) { 349 struct socket *sp; 350 351 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 352 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 353 SS_INCOMP); 354 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 355 so->so_incqlen--; 356 soclrstate(sp, SS_INCOMP); 357 soabort_async(sp, TRUE); 358 } 359 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 360 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 361 SS_COMP); 362 TAILQ_REMOVE(&so->so_comp, sp, so_list); 363 so->so_qlen--; 364 soclrstate(sp, SS_COMP); 365 soabort_async(sp, TRUE); 366 } 367 } 368 lwkt_relpooltoken(so); 369 } 370 371 /* 372 * Destroy a disconnected socket. This routine is a NOP if entities 373 * still have a reference on the socket: 374 * 375 * so_pcb - The protocol stack still has a reference 376 * SS_NOFDREF - There is no longer a file pointer reference 377 */ 378 void 379 sofree(struct socket *so) 380 { 381 struct socket *head; 382 383 /* 384 * This is a bit hackish at the moment. We need to interlock 385 * any accept queue we are on before we potentially lose the 386 * last reference to avoid races against a re-reference from 387 * someone operating on the queue. 388 */ 389 while ((head = so->so_head) != NULL) { 390 lwkt_getpooltoken(head); 391 if (so->so_head == head) 392 break; 393 lwkt_relpooltoken(head); 394 } 395 396 /* 397 * Arbitrage the last free. 398 */ 399 KKASSERT(so->so_refs > 0); 400 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 401 if (head) 402 lwkt_relpooltoken(head); 403 return; 404 } 405 406 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 407 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 408 409 if (head != NULL) { 410 /* 411 * We're done, remove ourselves from the accept queue we are 412 * on, if we are on one. 413 */ 414 if (so->so_state & SS_INCOMP) { 415 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 416 SS_INCOMP); 417 TAILQ_REMOVE(&head->so_incomp, so, so_list); 418 head->so_incqlen--; 419 } else if (so->so_state & SS_COMP) { 420 /* 421 * We must not decommission a socket that's 422 * on the accept(2) queue. If we do, then 423 * accept(2) may hang after select(2) indicated 424 * that the listening socket was ready. 425 */ 426 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 427 SS_COMP); 428 lwkt_relpooltoken(head); 429 return; 430 } else { 431 panic("sofree: not queued"); 432 } 433 soclrstate(so, SS_INCOMP); 434 so->so_head = NULL; 435 lwkt_relpooltoken(head); 436 } else { 437 /* Flush accept queues, if we are accepting. */ 438 soqflush(so); 439 } 440 ssb_release(&so->so_snd, so); 441 sorflush(so); 442 sodealloc(so); 443 } 444 445 /* 446 * Close a socket on last file table reference removal. 447 * Initiate disconnect if connected. 448 * Free socket when disconnect complete. 449 */ 450 int 451 soclose(struct socket *so, int fflag) 452 { 453 int error; 454 455 funsetown(&so->so_sigio); 456 sosetstate(so, SS_ISCLOSING); 457 if (!use_soclose_fast || 458 (so->so_proto->pr_flags & PR_SYNC_PORT) || 459 ((so->so_state & SS_ISCONNECTED) && 460 (so->so_options & SO_LINGER) && 461 so->so_linger != 0)) { 462 error = soclose_sync(so, fflag); 463 } else { 464 soclose_fast(so); 465 error = 0; 466 } 467 return error; 468 } 469 470 void 471 sodiscard(struct socket *so) 472 { 473 if (so->so_state & SS_NOFDREF) 474 panic("soclose: NOFDREF"); 475 sosetstate(so, SS_NOFDREF); /* take ref */ 476 } 477 478 /* 479 * Append the completed queue of head to head_inh (inherting listen socket). 480 */ 481 void 482 soinherit(struct socket *head, struct socket *head_inh) 483 { 484 boolean_t do_wakeup = FALSE; 485 486 KASSERT(head->so_options & SO_ACCEPTCONN, 487 ("head does not accept connection")); 488 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 489 ("head_inh does not accept connection")); 490 491 lwkt_getpooltoken(head); 492 lwkt_getpooltoken(head_inh); 493 494 if (head->so_qlen > 0) 495 do_wakeup = TRUE; 496 497 while (!TAILQ_EMPTY(&head->so_comp)) { 498 struct ucred *old_cr; 499 struct socket *sp; 500 501 sp = TAILQ_FIRST(&head->so_comp); 502 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 503 504 /* 505 * Remove this socket from the current listen socket 506 * completed queue. 507 */ 508 TAILQ_REMOVE(&head->so_comp, sp, so_list); 509 head->so_qlen--; 510 511 /* Save the old ucred for later free. */ 512 old_cr = sp->so_cred; 513 514 /* 515 * Install this socket to the inheriting listen socket 516 * completed queue. 517 */ 518 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 519 sp->so_head = head_inh; 520 521 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 522 head_inh->so_qlen++; 523 524 /* 525 * NOTE: 526 * crfree() may block and release the tokens temporarily. 527 * However, we are fine here, since the transition is done. 528 */ 529 crfree(old_cr); 530 } 531 532 lwkt_relpooltoken(head_inh); 533 lwkt_relpooltoken(head); 534 535 if (do_wakeup) { 536 /* 537 * "New" connections have arrived 538 */ 539 sorwakeup(head_inh); 540 wakeup(&head_inh->so_timeo); 541 } 542 } 543 544 static int 545 soclose_sync(struct socket *so, int fflag) 546 { 547 int error = 0; 548 549 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 550 so_pru_sync(so); /* unpend async prus */ 551 552 if (so->so_pcb == NULL) 553 goto discard; 554 555 if (so->so_state & SS_ISCONNECTED) { 556 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 557 error = sodisconnect(so); 558 if (error) 559 goto drop; 560 } 561 if (so->so_options & SO_LINGER) { 562 if ((so->so_state & SS_ISDISCONNECTING) && 563 (fflag & FNONBLOCK)) 564 goto drop; 565 while (so->so_state & SS_ISCONNECTED) { 566 error = tsleep(&so->so_timeo, PCATCH, 567 "soclos", so->so_linger * hz); 568 if (error) 569 break; 570 } 571 } 572 } 573 drop: 574 if (so->so_pcb) { 575 int error2; 576 577 error2 = so_pru_detach(so); 578 if (error2 == EJUSTRETURN) { 579 /* 580 * Protocol will call sodiscard() 581 * and sofree() for us. 582 */ 583 return error; 584 } 585 if (error == 0) 586 error = error2; 587 } 588 discard: 589 sodiscard(so); 590 sofree(so); /* dispose of ref */ 591 592 return (error); 593 } 594 595 static void 596 soclose_fast_handler(netmsg_t msg) 597 { 598 struct socket *so = msg->base.nm_so; 599 600 if (so->so_pcb == NULL) 601 goto discard; 602 603 if ((so->so_state & SS_ISCONNECTED) && 604 (so->so_state & SS_ISDISCONNECTING) == 0) 605 so_pru_disconnect_direct(so); 606 607 if (so->so_pcb) { 608 int error; 609 610 error = so_pru_detach_direct(so); 611 if (error == EJUSTRETURN) { 612 /* 613 * Protocol will call sodiscard() 614 * and sofree() for us. 615 */ 616 return; 617 } 618 } 619 discard: 620 sodiscard(so); 621 sofree(so); 622 } 623 624 static void 625 soclose_fast(struct socket *so) 626 { 627 struct netmsg_base *base = &so->so_clomsg; 628 629 netmsg_init(base, so, &netisr_apanic_rport, 0, 630 soclose_fast_handler); 631 if (so->so_port == netisr_curport()) 632 lwkt_sendmsg_oncpu(so->so_port, &base->lmsg); 633 else 634 lwkt_sendmsg(so->so_port, &base->lmsg); 635 } 636 637 /* 638 * Abort and destroy a socket. Only one abort can be in progress 639 * at any given moment. 640 */ 641 void 642 soabort_async(struct socket *so, boolean_t clr_head) 643 { 644 /* 645 * Keep a reference before clearing the so_head 646 * to avoid racing socket close in netisr. 647 */ 648 soreference(so); 649 if (clr_head) 650 so->so_head = NULL; 651 so_pru_abort_async(so); 652 } 653 654 void 655 soabort_direct(struct socket *so) 656 { 657 soreference(so); 658 so_pru_abort_direct(so); 659 } 660 661 /* 662 * so is passed in ref'd, which becomes owned by 663 * the cleared SS_NOFDREF flag. 664 */ 665 void 666 soaccept_generic(struct socket *so) 667 { 668 if ((so->so_state & SS_NOFDREF) == 0) 669 panic("soaccept: !NOFDREF"); 670 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 671 } 672 673 int 674 soaccept(struct socket *so, struct sockaddr **nam) 675 { 676 int error; 677 678 soaccept_generic(so); 679 error = so_pru_accept(so, nam); 680 return (error); 681 } 682 683 int 684 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 685 boolean_t sync) 686 { 687 int error; 688 689 if (so->so_options & SO_ACCEPTCONN) 690 return (EOPNOTSUPP); 691 /* 692 * If protocol is connection-based, can only connect once. 693 * Otherwise, if connected, try to disconnect first. 694 * This allows user to disconnect by connecting to, e.g., 695 * a null address. 696 */ 697 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 698 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 699 (error = sodisconnect(so)))) { 700 error = EISCONN; 701 } else { 702 /* 703 * Prevent accumulated error from previous connection 704 * from biting us. 705 */ 706 so->so_error = 0; 707 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 708 error = so_pru_connect_async(so, nam, td); 709 else 710 error = so_pru_connect(so, nam, td); 711 } 712 return (error); 713 } 714 715 int 716 soconnect2(struct socket *so1, struct socket *so2) 717 { 718 int error; 719 720 error = so_pru_connect2(so1, so2); 721 return (error); 722 } 723 724 int 725 sodisconnect(struct socket *so) 726 { 727 int error; 728 729 if ((so->so_state & SS_ISCONNECTED) == 0) { 730 error = ENOTCONN; 731 goto bad; 732 } 733 if (so->so_state & SS_ISDISCONNECTING) { 734 error = EALREADY; 735 goto bad; 736 } 737 error = so_pru_disconnect(so); 738 bad: 739 return (error); 740 } 741 742 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 743 /* 744 * Send on a socket. 745 * If send must go all at once and message is larger than 746 * send buffering, then hard error. 747 * Lock against other senders. 748 * If must go all at once and not enough room now, then 749 * inform user that this would block and do nothing. 750 * Otherwise, if nonblocking, send as much as possible. 751 * The data to be sent is described by "uio" if nonzero, 752 * otherwise by the mbuf chain "top" (which must be null 753 * if uio is not). Data provided in mbuf chain must be small 754 * enough to send all at once. 755 * 756 * Returns nonzero on error, timeout or signal; callers 757 * must check for short counts if EINTR/ERESTART are returned. 758 * Data and control buffers are freed on return. 759 */ 760 int 761 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 762 struct mbuf *top, struct mbuf *control, int flags, 763 struct thread *td) 764 { 765 struct mbuf **mp; 766 struct mbuf *m; 767 size_t resid; 768 int space, len; 769 int clen = 0, error, dontroute, mlen; 770 int atomic = sosendallatonce(so) || top; 771 int pru_flags; 772 773 if (uio) { 774 resid = uio->uio_resid; 775 } else { 776 resid = (size_t)top->m_pkthdr.len; 777 #ifdef INVARIANTS 778 len = 0; 779 for (m = top; m; m = m->m_next) 780 len += m->m_len; 781 KKASSERT(top->m_pkthdr.len == len); 782 #endif 783 } 784 785 /* 786 * WARNING! resid is unsigned, space and len are signed. space 787 * can wind up negative if the sockbuf is overcommitted. 788 * 789 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 790 * type sockets since that's an error. 791 */ 792 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 793 error = EINVAL; 794 goto out; 795 } 796 797 dontroute = 798 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 799 (so->so_proto->pr_flags & PR_ATOMIC); 800 if (td->td_lwp != NULL) 801 td->td_lwp->lwp_ru.ru_msgsnd++; 802 if (control) 803 clen = control->m_len; 804 #define gotoerr(errcode) { error = errcode; goto release; } 805 806 restart: 807 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 808 if (error) 809 goto out; 810 811 do { 812 if (so->so_state & SS_CANTSENDMORE) 813 gotoerr(EPIPE); 814 if (so->so_error) { 815 error = so->so_error; 816 so->so_error = 0; 817 goto release; 818 } 819 if ((so->so_state & SS_ISCONNECTED) == 0) { 820 /* 821 * `sendto' and `sendmsg' is allowed on a connection- 822 * based socket if it supports implied connect. 823 * Return ENOTCONN if not connected and no address is 824 * supplied. 825 */ 826 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 827 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 828 if ((so->so_state & SS_ISCONFIRMING) == 0 && 829 !(resid == 0 && clen != 0)) 830 gotoerr(ENOTCONN); 831 } else if (addr == NULL) 832 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 833 ENOTCONN : EDESTADDRREQ); 834 } 835 if ((atomic && resid > so->so_snd.ssb_hiwat) || 836 clen > so->so_snd.ssb_hiwat) { 837 gotoerr(EMSGSIZE); 838 } 839 space = ssb_space(&so->so_snd); 840 if (flags & MSG_OOB) 841 space += 1024; 842 if ((space < 0 || (size_t)space < resid + clen) && uio && 843 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 844 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 845 gotoerr(EWOULDBLOCK); 846 ssb_unlock(&so->so_snd); 847 error = ssb_wait(&so->so_snd); 848 if (error) 849 goto out; 850 goto restart; 851 } 852 mp = ⊤ 853 space -= clen; 854 do { 855 if (uio == NULL) { 856 /* 857 * Data is prepackaged in "top". 858 */ 859 resid = 0; 860 if (flags & MSG_EOR) 861 top->m_flags |= M_EOR; 862 } else do { 863 if (resid > INT_MAX) 864 resid = INT_MAX; 865 m = m_getl((int)resid, M_WAITOK, MT_DATA, 866 top == NULL ? M_PKTHDR : 0, &mlen); 867 if (top == NULL) { 868 m->m_pkthdr.len = 0; 869 m->m_pkthdr.rcvif = NULL; 870 } 871 len = imin((int)szmin(mlen, resid), space); 872 if (resid < MINCLSIZE) { 873 /* 874 * For datagram protocols, leave room 875 * for protocol headers in first mbuf. 876 */ 877 if (atomic && top == NULL && len < mlen) 878 MH_ALIGN(m, len); 879 } 880 space -= len; 881 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 882 resid = uio->uio_resid; 883 m->m_len = len; 884 *mp = m; 885 top->m_pkthdr.len += len; 886 if (error) 887 goto release; 888 mp = &m->m_next; 889 if (resid == 0) { 890 if (flags & MSG_EOR) 891 top->m_flags |= M_EOR; 892 break; 893 } 894 } while (space > 0 && atomic); 895 if (dontroute) 896 so->so_options |= SO_DONTROUTE; 897 if (flags & MSG_OOB) { 898 pru_flags = PRUS_OOB; 899 } else if ((flags & MSG_EOF) && 900 (so->so_proto->pr_flags & PR_IMPLOPCL) && 901 (resid == 0)) { 902 /* 903 * If the user set MSG_EOF, the protocol 904 * understands this flag and nothing left to 905 * send then use PRU_SEND_EOF instead of PRU_SEND. 906 */ 907 pru_flags = PRUS_EOF; 908 } else if (resid > 0 && space > 0) { 909 /* If there is more to send, set PRUS_MORETOCOME */ 910 pru_flags = PRUS_MORETOCOME; 911 } else { 912 pru_flags = 0; 913 } 914 /* 915 * XXX all the SS_CANTSENDMORE checks previously 916 * done could be out of date. We could have recieved 917 * a reset packet in an interrupt or maybe we slept 918 * while doing page faults in uiomove() etc. We could 919 * probably recheck again inside the splnet() protection 920 * here, but there are probably other places that this 921 * also happens. We must rethink this. 922 */ 923 error = so_pru_send(so, pru_flags, top, addr, control, td); 924 if (dontroute) 925 so->so_options &= ~SO_DONTROUTE; 926 clen = 0; 927 control = NULL; 928 top = NULL; 929 mp = ⊤ 930 if (error) 931 goto release; 932 } while (resid && space > 0); 933 } while (resid); 934 935 release: 936 ssb_unlock(&so->so_snd); 937 out: 938 if (top) 939 m_freem(top); 940 if (control) 941 m_freem(control); 942 return (error); 943 } 944 945 #ifdef INET 946 /* 947 * A specialization of sosend() for UDP based on protocol-specific knowledge: 948 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 949 * sosendallatonce() returns true, 950 * the "atomic" variable is true, 951 * and sosendudp() blocks until space is available for the entire send. 952 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 953 * PR_IMPLOPCL flags set. 954 * UDP has no out-of-band data. 955 * UDP has no control data. 956 * UDP does not support MSG_EOR. 957 */ 958 int 959 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 960 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 961 { 962 size_t resid; 963 int error, pru_flags = 0; 964 int space; 965 966 if (td->td_lwp != NULL) 967 td->td_lwp->lwp_ru.ru_msgsnd++; 968 if (control) 969 m_freem(control); 970 971 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 972 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 973 974 restart: 975 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 976 if (error) 977 goto out; 978 979 if (so->so_state & SS_CANTSENDMORE) 980 gotoerr(EPIPE); 981 if (so->so_error) { 982 error = so->so_error; 983 so->so_error = 0; 984 goto release; 985 } 986 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 987 gotoerr(EDESTADDRREQ); 988 if (resid > so->so_snd.ssb_hiwat) 989 gotoerr(EMSGSIZE); 990 space = ssb_space(&so->so_snd); 991 if (uio && (space < 0 || (size_t)space < resid)) { 992 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 993 gotoerr(EWOULDBLOCK); 994 ssb_unlock(&so->so_snd); 995 error = ssb_wait(&so->so_snd); 996 if (error) 997 goto out; 998 goto restart; 999 } 1000 1001 if (uio) { 1002 int hdrlen = max_hdr; 1003 1004 /* 1005 * We try to optimize out the additional mbuf 1006 * allocations in M_PREPEND() on output path, e.g. 1007 * - udp_output(), when it tries to prepend protocol 1008 * headers. 1009 * - Link layer output function, when it tries to 1010 * prepend link layer header. 1011 * 1012 * This probably will not benefit any data that will 1013 * be fragmented, so this optimization is only performed 1014 * when the size of data and max size of protocol+link 1015 * headers fit into one mbuf cluster. 1016 */ 1017 if (uio->uio_resid > MCLBYTES - hdrlen || 1018 !udp_sosend_prepend) { 1019 top = m_uiomove(uio); 1020 if (top == NULL) 1021 goto release; 1022 } else { 1023 int nsize; 1024 1025 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1026 MT_DATA, M_PKTHDR, &nsize); 1027 KASSERT(nsize >= uio->uio_resid + hdrlen, 1028 ("sosendudp invalid nsize %d, " 1029 "resid %zu, hdrlen %d", 1030 nsize, uio->uio_resid, hdrlen)); 1031 1032 top->m_len = uio->uio_resid; 1033 top->m_pkthdr.len = uio->uio_resid; 1034 top->m_data += hdrlen; 1035 1036 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1037 if (error) 1038 goto out; 1039 } 1040 } 1041 1042 if (flags & MSG_DONTROUTE) 1043 pru_flags |= PRUS_DONTROUTE; 1044 1045 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1046 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1047 error = 0; 1048 } else { 1049 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1050 } 1051 top = NULL; /* sent or freed in lower layer */ 1052 1053 release: 1054 ssb_unlock(&so->so_snd); 1055 out: 1056 if (top) 1057 m_freem(top); 1058 return (error); 1059 } 1060 1061 int 1062 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1063 struct mbuf *top, struct mbuf *control, int flags, 1064 struct thread *td) 1065 { 1066 struct mbuf **mp; 1067 struct mbuf *m; 1068 size_t resid; 1069 int space, len; 1070 int error, mlen; 1071 int allatonce; 1072 int pru_flags; 1073 1074 if (uio) { 1075 KKASSERT(top == NULL); 1076 allatonce = 0; 1077 resid = uio->uio_resid; 1078 } else { 1079 allatonce = 1; 1080 resid = (size_t)top->m_pkthdr.len; 1081 #ifdef INVARIANTS 1082 len = 0; 1083 for (m = top; m; m = m->m_next) 1084 len += m->m_len; 1085 KKASSERT(top->m_pkthdr.len == len); 1086 #endif 1087 } 1088 1089 /* 1090 * WARNING! resid is unsigned, space and len are signed. space 1091 * can wind up negative if the sockbuf is overcommitted. 1092 * 1093 * Also check to make sure that MSG_EOR isn't used on TCP 1094 */ 1095 if (flags & MSG_EOR) { 1096 error = EINVAL; 1097 goto out; 1098 } 1099 1100 if (control) { 1101 /* TCP doesn't do control messages (rights, creds, etc) */ 1102 if (control->m_len) { 1103 error = EINVAL; 1104 goto out; 1105 } 1106 m_freem(control); /* empty control, just free it */ 1107 control = NULL; 1108 } 1109 1110 if (td->td_lwp != NULL) 1111 td->td_lwp->lwp_ru.ru_msgsnd++; 1112 1113 #define gotoerr(errcode) { error = errcode; goto release; } 1114 1115 restart: 1116 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1117 if (error) 1118 goto out; 1119 1120 do { 1121 if (so->so_state & SS_CANTSENDMORE) 1122 gotoerr(EPIPE); 1123 if (so->so_error) { 1124 error = so->so_error; 1125 so->so_error = 0; 1126 goto release; 1127 } 1128 if ((so->so_state & SS_ISCONNECTED) == 0 && 1129 (so->so_state & SS_ISCONFIRMING) == 0) 1130 gotoerr(ENOTCONN); 1131 if (allatonce && resid > so->so_snd.ssb_hiwat) 1132 gotoerr(EMSGSIZE); 1133 1134 space = ssb_space_prealloc(&so->so_snd); 1135 if (flags & MSG_OOB) 1136 space += 1024; 1137 if ((space < 0 || (size_t)space < resid) && !allatonce && 1138 space < so->so_snd.ssb_lowat) { 1139 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1140 gotoerr(EWOULDBLOCK); 1141 ssb_unlock(&so->so_snd); 1142 error = ssb_wait(&so->so_snd); 1143 if (error) 1144 goto out; 1145 goto restart; 1146 } 1147 mp = ⊤ 1148 do { 1149 int cnt = 0, async = 0; 1150 1151 if (uio == NULL) { 1152 /* 1153 * Data is prepackaged in "top". 1154 */ 1155 resid = 0; 1156 } else do { 1157 if (resid > INT_MAX) 1158 resid = INT_MAX; 1159 if (tcp_sosend_jcluster) { 1160 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1161 top == NULL ? M_PKTHDR : 0, &mlen); 1162 } else { 1163 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1164 top == NULL ? M_PKTHDR : 0, &mlen); 1165 } 1166 if (top == NULL) { 1167 m->m_pkthdr.len = 0; 1168 m->m_pkthdr.rcvif = NULL; 1169 } 1170 len = imin((int)szmin(mlen, resid), space); 1171 space -= len; 1172 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1173 resid = uio->uio_resid; 1174 m->m_len = len; 1175 *mp = m; 1176 top->m_pkthdr.len += len; 1177 if (error) 1178 goto release; 1179 mp = &m->m_next; 1180 if (resid == 0) 1181 break; 1182 ++cnt; 1183 } while (space > 0 && cnt < tcp_sosend_agglim); 1184 1185 if (tcp_sosend_async) 1186 async = 1; 1187 1188 if (flags & MSG_OOB) { 1189 pru_flags = PRUS_OOB; 1190 async = 0; 1191 } else if ((flags & MSG_EOF) && resid == 0) { 1192 pru_flags = PRUS_EOF; 1193 } else if (resid > 0 && space > 0) { 1194 /* If there is more to send, set PRUS_MORETOCOME */ 1195 pru_flags = PRUS_MORETOCOME; 1196 async = 1; 1197 } else { 1198 pru_flags = 0; 1199 } 1200 1201 if (flags & MSG_SYNC) 1202 async = 0; 1203 1204 /* 1205 * XXX all the SS_CANTSENDMORE checks previously 1206 * done could be out of date. We could have recieved 1207 * a reset packet in an interrupt or maybe we slept 1208 * while doing page faults in uiomove() etc. We could 1209 * probably recheck again inside the splnet() protection 1210 * here, but there are probably other places that this 1211 * also happens. We must rethink this. 1212 */ 1213 for (m = top; m; m = m->m_next) 1214 ssb_preallocstream(&so->so_snd, m); 1215 if (!async) { 1216 error = so_pru_send(so, pru_flags, top, 1217 NULL, NULL, td); 1218 } else { 1219 so_pru_send_async(so, pru_flags, top, 1220 NULL, NULL, td); 1221 error = 0; 1222 } 1223 1224 top = NULL; 1225 mp = ⊤ 1226 if (error) 1227 goto release; 1228 } while (resid && space > 0); 1229 } while (resid); 1230 1231 release: 1232 ssb_unlock(&so->so_snd); 1233 out: 1234 if (top) 1235 m_freem(top); 1236 if (control) 1237 m_freem(control); 1238 return (error); 1239 } 1240 #endif 1241 1242 /* 1243 * Implement receive operations on a socket. 1244 * 1245 * We depend on the way that records are added to the signalsockbuf 1246 * by sbappend*. In particular, each record (mbufs linked through m_next) 1247 * must begin with an address if the protocol so specifies, 1248 * followed by an optional mbuf or mbufs containing ancillary data, 1249 * and then zero or more mbufs of data. 1250 * 1251 * Although the signalsockbuf is locked, new data may still be appended. 1252 * A token inside the ssb_lock deals with MP issues and still allows 1253 * the network to access the socket if we block in a uio. 1254 * 1255 * The caller may receive the data as a single mbuf chain by supplying 1256 * an mbuf **mp0 for use in returning the chain. The uio is then used 1257 * only for the count in uio_resid. 1258 */ 1259 int 1260 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1261 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1262 { 1263 struct mbuf *m, *n; 1264 struct mbuf *free_chain = NULL; 1265 int flags, len, error, offset; 1266 struct protosw *pr = so->so_proto; 1267 int moff, type = 0; 1268 size_t resid, orig_resid; 1269 boolean_t free_rights = FALSE; 1270 1271 if (uio) 1272 resid = uio->uio_resid; 1273 else 1274 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1275 orig_resid = resid; 1276 1277 if (psa) 1278 *psa = NULL; 1279 if (controlp) 1280 *controlp = NULL; 1281 if (flagsp) 1282 flags = *flagsp &~ MSG_EOR; 1283 else 1284 flags = 0; 1285 if (flags & MSG_OOB) { 1286 m = m_get(M_WAITOK, MT_DATA); 1287 if (m == NULL) 1288 return (ENOBUFS); 1289 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1290 if (error) 1291 goto bad; 1292 if (sio) { 1293 do { 1294 sbappend(sio, m); 1295 KKASSERT(resid >= (size_t)m->m_len); 1296 resid -= (size_t)m->m_len; 1297 } while (resid > 0 && m); 1298 } else { 1299 do { 1300 uio->uio_resid = resid; 1301 error = uiomove(mtod(m, caddr_t), 1302 (int)szmin(resid, m->m_len), 1303 uio); 1304 resid = uio->uio_resid; 1305 m = m_free(m); 1306 } while (uio->uio_resid && error == 0 && m); 1307 } 1308 bad: 1309 if (m) 1310 m_freem(m); 1311 return (error); 1312 } 1313 if ((so->so_state & SS_ISCONFIRMING) && resid) 1314 so_pru_rcvd(so, 0); 1315 1316 /* 1317 * The token interlocks against the protocol thread while 1318 * ssb_lock is a blocking lock against other userland entities. 1319 */ 1320 lwkt_gettoken(&so->so_rcv.ssb_token); 1321 restart: 1322 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1323 if (error) 1324 goto done; 1325 1326 m = so->so_rcv.ssb_mb; 1327 /* 1328 * If we have less data than requested, block awaiting more 1329 * (subject to any timeout) if: 1330 * 1. the current count is less than the low water mark, or 1331 * 2. MSG_WAITALL is set, and it is possible to do the entire 1332 * receive operation at once if we block (resid <= hiwat). 1333 * 3. MSG_DONTWAIT is not set 1334 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1335 * we have to do the receive in sections, and thus risk returning 1336 * a short count if a timeout or signal occurs after we start. 1337 */ 1338 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1339 (size_t)so->so_rcv.ssb_cc < resid) && 1340 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1341 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1342 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1343 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1344 if (so->so_error) { 1345 if (m) 1346 goto dontblock; 1347 error = so->so_error; 1348 if ((flags & MSG_PEEK) == 0) 1349 so->so_error = 0; 1350 goto release; 1351 } 1352 if (so->so_state & SS_CANTRCVMORE) { 1353 if (m) 1354 goto dontblock; 1355 else 1356 goto release; 1357 } 1358 for (; m; m = m->m_next) { 1359 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1360 m = so->so_rcv.ssb_mb; 1361 goto dontblock; 1362 } 1363 } 1364 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1365 (pr->pr_flags & PR_CONNREQUIRED)) { 1366 error = ENOTCONN; 1367 goto release; 1368 } 1369 if (resid == 0) 1370 goto release; 1371 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1372 error = EWOULDBLOCK; 1373 goto release; 1374 } 1375 ssb_unlock(&so->so_rcv); 1376 error = ssb_wait(&so->so_rcv); 1377 if (error) 1378 goto done; 1379 goto restart; 1380 } 1381 dontblock: 1382 if (uio && uio->uio_td && uio->uio_td->td_proc) 1383 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1384 1385 /* 1386 * note: m should be == sb_mb here. Cache the next record while 1387 * cleaning up. Note that calling m_free*() will break out critical 1388 * section. 1389 */ 1390 KKASSERT(m == so->so_rcv.ssb_mb); 1391 1392 /* 1393 * Skip any address mbufs prepending the record. 1394 */ 1395 if (pr->pr_flags & PR_ADDR) { 1396 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1397 orig_resid = 0; 1398 if (psa) 1399 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1400 if (flags & MSG_PEEK) 1401 m = m->m_next; 1402 else 1403 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1404 } 1405 1406 /* 1407 * Skip any control mbufs prepending the record. 1408 */ 1409 while (m && m->m_type == MT_CONTROL && error == 0) { 1410 if (flags & MSG_PEEK) { 1411 if (controlp) 1412 *controlp = m_copy(m, 0, m->m_len); 1413 m = m->m_next; /* XXX race */ 1414 } else { 1415 const struct cmsghdr *cm = mtod(m, struct cmsghdr *); 1416 1417 if (controlp) { 1418 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1419 if (pr->pr_domain->dom_externalize && 1420 cm->cmsg_level == SOL_SOCKET && 1421 cm->cmsg_type == SCM_RIGHTS) { 1422 error = pr->pr_domain->dom_externalize 1423 (m, flags); 1424 } 1425 *controlp = m; 1426 m = n; 1427 } else { 1428 if (cm->cmsg_level == SOL_SOCKET && 1429 cm->cmsg_type == SCM_RIGHTS) 1430 free_rights = TRUE; 1431 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1432 } 1433 } 1434 if (controlp && *controlp) { 1435 orig_resid = 0; 1436 controlp = &(*controlp)->m_next; 1437 } 1438 } 1439 1440 /* 1441 * flag OOB data. 1442 */ 1443 if (m) { 1444 type = m->m_type; 1445 if (type == MT_OOBDATA) 1446 flags |= MSG_OOB; 1447 } 1448 1449 /* 1450 * Copy to the UIO or mbuf return chain (*mp). 1451 */ 1452 moff = 0; 1453 offset = 0; 1454 while (m && resid > 0 && error == 0) { 1455 if (m->m_type == MT_OOBDATA) { 1456 if (type != MT_OOBDATA) 1457 break; 1458 } else if (type == MT_OOBDATA) 1459 break; 1460 else 1461 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1462 ("receive 3")); 1463 soclrstate(so, SS_RCVATMARK); 1464 len = (resid > INT_MAX) ? INT_MAX : resid; 1465 if (so->so_oobmark && len > so->so_oobmark - offset) 1466 len = so->so_oobmark - offset; 1467 if (len > m->m_len - moff) 1468 len = m->m_len - moff; 1469 1470 /* 1471 * Copy out to the UIO or pass the mbufs back to the SIO. 1472 * The SIO is dealt with when we eat the mbuf, but deal 1473 * with the resid here either way. 1474 */ 1475 if (uio) { 1476 uio->uio_resid = resid; 1477 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1478 resid = uio->uio_resid; 1479 if (error) 1480 goto release; 1481 } else { 1482 resid -= (size_t)len; 1483 } 1484 1485 /* 1486 * Eat the entire mbuf or just a piece of it 1487 */ 1488 if (len == m->m_len - moff) { 1489 if (m->m_flags & M_EOR) 1490 flags |= MSG_EOR; 1491 if (flags & MSG_PEEK) { 1492 m = m->m_next; 1493 moff = 0; 1494 } else { 1495 if (sio) { 1496 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1497 sbappend(sio, m); 1498 m = n; 1499 } else { 1500 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1501 } 1502 } 1503 } else { 1504 if (flags & MSG_PEEK) { 1505 moff += len; 1506 } else { 1507 if (sio) { 1508 n = m_copym(m, 0, len, M_WAITOK); 1509 if (n) 1510 sbappend(sio, n); 1511 } 1512 m->m_data += len; 1513 m->m_len -= len; 1514 so->so_rcv.ssb_cc -= len; 1515 } 1516 } 1517 if (so->so_oobmark) { 1518 if ((flags & MSG_PEEK) == 0) { 1519 so->so_oobmark -= len; 1520 if (so->so_oobmark == 0) { 1521 sosetstate(so, SS_RCVATMARK); 1522 break; 1523 } 1524 } else { 1525 offset += len; 1526 if (offset == so->so_oobmark) 1527 break; 1528 } 1529 } 1530 if (flags & MSG_EOR) 1531 break; 1532 /* 1533 * If the MSG_WAITALL flag is set (for non-atomic socket), 1534 * we must not quit until resid == 0 or an error 1535 * termination. If a signal/timeout occurs, return 1536 * with a short count but without error. 1537 * Keep signalsockbuf locked against other readers. 1538 */ 1539 while ((flags & MSG_WAITALL) && m == NULL && 1540 resid > 0 && !sosendallatonce(so) && 1541 so->so_rcv.ssb_mb == NULL) { 1542 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1543 break; 1544 /* 1545 * The window might have closed to zero, make 1546 * sure we send an ack now that we've drained 1547 * the buffer or we might end up blocking until 1548 * the idle takes over (5 seconds). 1549 */ 1550 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1551 so_pru_rcvd(so, flags); 1552 error = ssb_wait(&so->so_rcv); 1553 if (error) { 1554 ssb_unlock(&so->so_rcv); 1555 error = 0; 1556 goto done; 1557 } 1558 m = so->so_rcv.ssb_mb; 1559 } 1560 } 1561 1562 /* 1563 * If an atomic read was requested but unread data still remains 1564 * in the record, set MSG_TRUNC. 1565 */ 1566 if (m && pr->pr_flags & PR_ATOMIC) 1567 flags |= MSG_TRUNC; 1568 1569 /* 1570 * Cleanup. If an atomic read was requested drop any unread data. 1571 */ 1572 if ((flags & MSG_PEEK) == 0) { 1573 if (m && (pr->pr_flags & PR_ATOMIC)) 1574 sbdroprecord(&so->so_rcv.sb); 1575 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1576 so_pru_rcvd(so, flags); 1577 } 1578 1579 if (orig_resid == resid && orig_resid && 1580 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1581 ssb_unlock(&so->so_rcv); 1582 goto restart; 1583 } 1584 1585 if (flagsp) 1586 *flagsp |= flags; 1587 release: 1588 ssb_unlock(&so->so_rcv); 1589 done: 1590 lwkt_reltoken(&so->so_rcv.ssb_token); 1591 if (free_chain) { 1592 if (free_rights && (pr->pr_flags & PR_RIGHTS) && 1593 pr->pr_domain->dom_dispose) 1594 pr->pr_domain->dom_dispose(free_chain); 1595 m_freem(free_chain); 1596 } 1597 return (error); 1598 } 1599 1600 int 1601 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1602 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1603 { 1604 struct mbuf *m, *n; 1605 struct mbuf *free_chain = NULL; 1606 int flags, len, error, offset; 1607 struct protosw *pr = so->so_proto; 1608 int moff; 1609 int didoob; 1610 size_t resid, orig_resid, restmp; 1611 1612 if (uio) 1613 resid = uio->uio_resid; 1614 else 1615 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1616 orig_resid = resid; 1617 1618 if (psa) 1619 *psa = NULL; 1620 if (controlp) 1621 *controlp = NULL; 1622 if (flagsp) 1623 flags = *flagsp &~ MSG_EOR; 1624 else 1625 flags = 0; 1626 if (flags & MSG_OOB) { 1627 m = m_get(M_WAITOK, MT_DATA); 1628 if (m == NULL) 1629 return (ENOBUFS); 1630 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1631 if (error) 1632 goto bad; 1633 if (sio) { 1634 do { 1635 sbappend(sio, m); 1636 KKASSERT(resid >= (size_t)m->m_len); 1637 resid -= (size_t)m->m_len; 1638 } while (resid > 0 && m); 1639 } else { 1640 do { 1641 uio->uio_resid = resid; 1642 error = uiomove(mtod(m, caddr_t), 1643 (int)szmin(resid, m->m_len), 1644 uio); 1645 resid = uio->uio_resid; 1646 m = m_free(m); 1647 } while (uio->uio_resid && error == 0 && m); 1648 } 1649 bad: 1650 if (m) 1651 m_freem(m); 1652 return (error); 1653 } 1654 1655 /* 1656 * The token interlocks against the protocol thread while 1657 * ssb_lock is a blocking lock against other userland entities. 1658 * 1659 * Lock a limited number of mbufs (not all, so sbcompress() still 1660 * works well). The token is used as an interlock for sbwait() so 1661 * release it afterwords. 1662 */ 1663 restart: 1664 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1665 if (error) 1666 goto done; 1667 1668 lwkt_gettoken(&so->so_rcv.ssb_token); 1669 m = so->so_rcv.ssb_mb; 1670 1671 /* 1672 * If we have less data than requested, block awaiting more 1673 * (subject to any timeout) if: 1674 * 1. the current count is less than the low water mark, or 1675 * 2. MSG_WAITALL is set, and it is possible to do the entire 1676 * receive operation at once if we block (resid <= hiwat). 1677 * 3. MSG_DONTWAIT is not set 1678 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1679 * we have to do the receive in sections, and thus risk returning 1680 * a short count if a timeout or signal occurs after we start. 1681 */ 1682 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1683 (size_t)so->so_rcv.ssb_cc < resid) && 1684 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1685 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1686 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1687 if (so->so_error) { 1688 if (m) 1689 goto dontblock; 1690 lwkt_reltoken(&so->so_rcv.ssb_token); 1691 error = so->so_error; 1692 if ((flags & MSG_PEEK) == 0) 1693 so->so_error = 0; 1694 goto release; 1695 } 1696 if (so->so_state & SS_CANTRCVMORE) { 1697 if (m) 1698 goto dontblock; 1699 lwkt_reltoken(&so->so_rcv.ssb_token); 1700 goto release; 1701 } 1702 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1703 (pr->pr_flags & PR_CONNREQUIRED)) { 1704 lwkt_reltoken(&so->so_rcv.ssb_token); 1705 error = ENOTCONN; 1706 goto release; 1707 } 1708 if (resid == 0) { 1709 lwkt_reltoken(&so->so_rcv.ssb_token); 1710 goto release; 1711 } 1712 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1713 lwkt_reltoken(&so->so_rcv.ssb_token); 1714 error = EWOULDBLOCK; 1715 goto release; 1716 } 1717 ssb_unlock(&so->so_rcv); 1718 error = ssb_wait(&so->so_rcv); 1719 lwkt_reltoken(&so->so_rcv.ssb_token); 1720 if (error) 1721 goto done; 1722 goto restart; 1723 } 1724 1725 /* 1726 * Token still held 1727 */ 1728 dontblock: 1729 n = m; 1730 restmp = 0; 1731 while (n && restmp < resid) { 1732 n->m_flags |= M_SOLOCKED; 1733 restmp += n->m_len; 1734 if (n->m_next == NULL) 1735 n = n->m_nextpkt; 1736 else 1737 n = n->m_next; 1738 } 1739 1740 /* 1741 * Release token for loop 1742 */ 1743 lwkt_reltoken(&so->so_rcv.ssb_token); 1744 if (uio && uio->uio_td && uio->uio_td->td_proc) 1745 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1746 1747 /* 1748 * note: m should be == sb_mb here. Cache the next record while 1749 * cleaning up. Note that calling m_free*() will break out critical 1750 * section. 1751 */ 1752 KKASSERT(m == so->so_rcv.ssb_mb); 1753 1754 /* 1755 * Copy to the UIO or mbuf return chain (*mp). 1756 * 1757 * NOTE: Token is not held for loop 1758 */ 1759 moff = 0; 1760 offset = 0; 1761 didoob = 0; 1762 1763 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1764 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1765 ("receive 3")); 1766 1767 soclrstate(so, SS_RCVATMARK); 1768 len = (resid > INT_MAX) ? INT_MAX : resid; 1769 if (so->so_oobmark && len > so->so_oobmark - offset) 1770 len = so->so_oobmark - offset; 1771 if (len > m->m_len - moff) 1772 len = m->m_len - moff; 1773 1774 /* 1775 * Copy out to the UIO or pass the mbufs back to the SIO. 1776 * The SIO is dealt with when we eat the mbuf, but deal 1777 * with the resid here either way. 1778 */ 1779 if (uio) { 1780 uio->uio_resid = resid; 1781 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1782 resid = uio->uio_resid; 1783 if (error) 1784 goto release; 1785 } else { 1786 resid -= (size_t)len; 1787 } 1788 1789 /* 1790 * Eat the entire mbuf or just a piece of it 1791 */ 1792 offset += len; 1793 if (len == m->m_len - moff) { 1794 m = m->m_next; 1795 moff = 0; 1796 } else { 1797 moff += len; 1798 } 1799 1800 /* 1801 * Check oobmark 1802 */ 1803 if (so->so_oobmark && offset == so->so_oobmark) { 1804 didoob = 1; 1805 break; 1806 } 1807 } 1808 1809 /* 1810 * Synchronize sockbuf with data we read. 1811 * 1812 * NOTE: (m) is junk on entry (it could be left over from the 1813 * previous loop). 1814 */ 1815 if ((flags & MSG_PEEK) == 0) { 1816 lwkt_gettoken(&so->so_rcv.ssb_token); 1817 m = so->so_rcv.ssb_mb; 1818 while (m && offset >= m->m_len) { 1819 if (so->so_oobmark) { 1820 so->so_oobmark -= m->m_len; 1821 if (so->so_oobmark == 0) { 1822 sosetstate(so, SS_RCVATMARK); 1823 didoob = 1; 1824 } 1825 } 1826 offset -= m->m_len; 1827 if (sio) { 1828 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1829 sbappend(sio, m); 1830 m = n; 1831 } else { 1832 m = sbunlinkmbuf(&so->so_rcv.sb, 1833 m, &free_chain); 1834 } 1835 } 1836 if (offset) { 1837 KKASSERT(m); 1838 if (sio) { 1839 n = m_copym(m, 0, offset, M_WAITOK); 1840 if (n) 1841 sbappend(sio, n); 1842 } 1843 m->m_data += offset; 1844 m->m_len -= offset; 1845 so->so_rcv.ssb_cc -= offset; 1846 if (so->so_oobmark) { 1847 so->so_oobmark -= offset; 1848 if (so->so_oobmark == 0) { 1849 sosetstate(so, SS_RCVATMARK); 1850 didoob = 1; 1851 } 1852 } 1853 offset = 0; 1854 } 1855 lwkt_reltoken(&so->so_rcv.ssb_token); 1856 } 1857 1858 /* 1859 * If the MSG_WAITALL flag is set (for non-atomic socket), 1860 * we must not quit until resid == 0 or an error termination. 1861 * 1862 * If a signal/timeout occurs, return with a short count but without 1863 * error. 1864 * 1865 * Keep signalsockbuf locked against other readers. 1866 * 1867 * XXX if MSG_PEEK we currently do quit. 1868 */ 1869 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1870 didoob == 0 && resid > 0 && 1871 !sosendallatonce(so)) { 1872 lwkt_gettoken(&so->so_rcv.ssb_token); 1873 error = 0; 1874 while ((m = so->so_rcv.ssb_mb) == NULL) { 1875 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1876 error = so->so_error; 1877 break; 1878 } 1879 /* 1880 * The window might have closed to zero, make 1881 * sure we send an ack now that we've drained 1882 * the buffer or we might end up blocking until 1883 * the idle takes over (5 seconds). 1884 */ 1885 if (so->so_pcb) 1886 so_pru_rcvd_async(so); 1887 if (so->so_rcv.ssb_mb == NULL) 1888 error = ssb_wait(&so->so_rcv); 1889 if (error) { 1890 lwkt_reltoken(&so->so_rcv.ssb_token); 1891 ssb_unlock(&so->so_rcv); 1892 error = 0; 1893 goto done; 1894 } 1895 } 1896 if (m && error == 0) 1897 goto dontblock; 1898 lwkt_reltoken(&so->so_rcv.ssb_token); 1899 } 1900 1901 /* 1902 * Token not held here. 1903 * 1904 * Cleanup. If an atomic read was requested drop any unread data XXX 1905 */ 1906 if ((flags & MSG_PEEK) == 0) { 1907 if (so->so_pcb) 1908 so_pru_rcvd_async(so); 1909 } 1910 1911 if (orig_resid == resid && orig_resid && 1912 (so->so_state & SS_CANTRCVMORE) == 0) { 1913 ssb_unlock(&so->so_rcv); 1914 goto restart; 1915 } 1916 1917 if (flagsp) 1918 *flagsp |= flags; 1919 release: 1920 ssb_unlock(&so->so_rcv); 1921 done: 1922 if (free_chain) 1923 m_freem(free_chain); 1924 return (error); 1925 } 1926 1927 /* 1928 * Shut a socket down. Note that we do not get a frontend lock as we 1929 * want to be able to shut the socket down even if another thread is 1930 * blocked in a read(), thus waking it up. 1931 */ 1932 int 1933 soshutdown(struct socket *so, int how) 1934 { 1935 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1936 return (EINVAL); 1937 1938 if (how != SHUT_WR) { 1939 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1940 sorflush(so); 1941 /*ssb_unlock(&so->so_rcv);*/ 1942 } 1943 if (how != SHUT_RD) 1944 return (so_pru_shutdown(so)); 1945 return (0); 1946 } 1947 1948 void 1949 sorflush(struct socket *so) 1950 { 1951 struct signalsockbuf *ssb = &so->so_rcv; 1952 struct protosw *pr = so->so_proto; 1953 struct signalsockbuf asb; 1954 1955 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1956 1957 lwkt_gettoken(&ssb->ssb_token); 1958 socantrcvmore(so); 1959 asb = *ssb; 1960 1961 /* 1962 * Can't just blow up the ssb structure here 1963 */ 1964 bzero(&ssb->sb, sizeof(ssb->sb)); 1965 ssb->ssb_timeo = 0; 1966 ssb->ssb_lowat = 0; 1967 ssb->ssb_hiwat = 0; 1968 ssb->ssb_mbmax = 0; 1969 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1970 1971 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1972 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1973 ssb_release(&asb, so); 1974 1975 lwkt_reltoken(&ssb->ssb_token); 1976 } 1977 1978 #ifdef INET 1979 static int 1980 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1981 { 1982 struct accept_filter_arg *afap = NULL; 1983 struct accept_filter *afp; 1984 struct so_accf *af = so->so_accf; 1985 int error = 0; 1986 1987 /* do not set/remove accept filters on non listen sockets */ 1988 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1989 error = EINVAL; 1990 goto out; 1991 } 1992 1993 /* removing the filter */ 1994 if (sopt == NULL) { 1995 if (af != NULL) { 1996 if (af->so_accept_filter != NULL && 1997 af->so_accept_filter->accf_destroy != NULL) { 1998 af->so_accept_filter->accf_destroy(so); 1999 } 2000 if (af->so_accept_filter_str != NULL) { 2001 kfree(af->so_accept_filter_str, M_ACCF); 2002 } 2003 kfree(af, M_ACCF); 2004 so->so_accf = NULL; 2005 } 2006 so->so_options &= ~SO_ACCEPTFILTER; 2007 return (0); 2008 } 2009 /* adding a filter */ 2010 /* must remove previous filter first */ 2011 if (af != NULL) { 2012 error = EINVAL; 2013 goto out; 2014 } 2015 /* don't put large objects on the kernel stack */ 2016 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2017 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2018 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2019 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2020 if (error) 2021 goto out; 2022 afp = accept_filt_get(afap->af_name); 2023 if (afp == NULL) { 2024 error = ENOENT; 2025 goto out; 2026 } 2027 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2028 if (afp->accf_create != NULL) { 2029 if (afap->af_name[0] != '\0') { 2030 int len = strlen(afap->af_name) + 1; 2031 2032 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2033 M_WAITOK); 2034 strcpy(af->so_accept_filter_str, afap->af_name); 2035 } 2036 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2037 if (af->so_accept_filter_arg == NULL) { 2038 kfree(af->so_accept_filter_str, M_ACCF); 2039 kfree(af, M_ACCF); 2040 so->so_accf = NULL; 2041 error = EINVAL; 2042 goto out; 2043 } 2044 } 2045 af->so_accept_filter = afp; 2046 so->so_accf = af; 2047 so->so_options |= SO_ACCEPTFILTER; 2048 out: 2049 if (afap != NULL) 2050 kfree(afap, M_TEMP); 2051 return (error); 2052 } 2053 #endif /* INET */ 2054 2055 /* 2056 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2057 * an additional variant to handle the case where the option value needs 2058 * to be some kind of integer, but not a specific size. 2059 * In addition to their use here, these functions are also called by the 2060 * protocol-level pr_ctloutput() routines. 2061 */ 2062 int 2063 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2064 { 2065 return soopt_to_kbuf(sopt, buf, len, minlen); 2066 } 2067 2068 int 2069 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2070 { 2071 size_t valsize; 2072 2073 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2074 KKASSERT(kva_p(buf)); 2075 2076 /* 2077 * If the user gives us more than we wanted, we ignore it, 2078 * but if we don't get the minimum length the caller 2079 * wants, we return EINVAL. On success, sopt->sopt_valsize 2080 * is set to however much we actually retrieved. 2081 */ 2082 if ((valsize = sopt->sopt_valsize) < minlen) 2083 return EINVAL; 2084 if (valsize > len) 2085 sopt->sopt_valsize = valsize = len; 2086 2087 bcopy(sopt->sopt_val, buf, valsize); 2088 return 0; 2089 } 2090 2091 2092 int 2093 sosetopt(struct socket *so, struct sockopt *sopt) 2094 { 2095 int error, optval; 2096 struct linger l; 2097 struct timeval tv; 2098 u_long val; 2099 struct signalsockbuf *sotmp; 2100 2101 error = 0; 2102 sopt->sopt_dir = SOPT_SET; 2103 if (sopt->sopt_level != SOL_SOCKET) { 2104 if (so->so_proto && so->so_proto->pr_ctloutput) { 2105 return (so_pr_ctloutput(so, sopt)); 2106 } 2107 error = ENOPROTOOPT; 2108 } else { 2109 switch (sopt->sopt_name) { 2110 #ifdef INET 2111 case SO_ACCEPTFILTER: 2112 error = do_setopt_accept_filter(so, sopt); 2113 if (error) 2114 goto bad; 2115 break; 2116 #endif /* INET */ 2117 case SO_LINGER: 2118 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2119 if (error) 2120 goto bad; 2121 2122 so->so_linger = l.l_linger; 2123 if (l.l_onoff) 2124 so->so_options |= SO_LINGER; 2125 else 2126 so->so_options &= ~SO_LINGER; 2127 break; 2128 2129 case SO_DEBUG: 2130 case SO_KEEPALIVE: 2131 case SO_DONTROUTE: 2132 case SO_USELOOPBACK: 2133 case SO_BROADCAST: 2134 case SO_REUSEADDR: 2135 case SO_REUSEPORT: 2136 case SO_OOBINLINE: 2137 case SO_TIMESTAMP: 2138 case SO_NOSIGPIPE: 2139 error = sooptcopyin(sopt, &optval, sizeof optval, 2140 sizeof optval); 2141 if (error) 2142 goto bad; 2143 if (optval) 2144 so->so_options |= sopt->sopt_name; 2145 else 2146 so->so_options &= ~sopt->sopt_name; 2147 break; 2148 2149 case SO_SNDBUF: 2150 case SO_RCVBUF: 2151 case SO_SNDLOWAT: 2152 case SO_RCVLOWAT: 2153 error = sooptcopyin(sopt, &optval, sizeof optval, 2154 sizeof optval); 2155 if (error) 2156 goto bad; 2157 2158 /* 2159 * Values < 1 make no sense for any of these 2160 * options, so disallow them. 2161 */ 2162 if (optval < 1) { 2163 error = EINVAL; 2164 goto bad; 2165 } 2166 2167 switch (sopt->sopt_name) { 2168 case SO_SNDBUF: 2169 case SO_RCVBUF: 2170 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2171 &so->so_snd : &so->so_rcv, (u_long)optval, 2172 so, 2173 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2174 error = ENOBUFS; 2175 goto bad; 2176 } 2177 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2178 &so->so_snd : &so->so_rcv; 2179 atomic_clear_int(&sotmp->ssb_flags, 2180 SSB_AUTOSIZE); 2181 break; 2182 2183 /* 2184 * Make sure the low-water is never greater than 2185 * the high-water. 2186 */ 2187 case SO_SNDLOWAT: 2188 so->so_snd.ssb_lowat = 2189 (optval > so->so_snd.ssb_hiwat) ? 2190 so->so_snd.ssb_hiwat : optval; 2191 atomic_clear_int(&so->so_snd.ssb_flags, 2192 SSB_AUTOLOWAT); 2193 break; 2194 case SO_RCVLOWAT: 2195 so->so_rcv.ssb_lowat = 2196 (optval > so->so_rcv.ssb_hiwat) ? 2197 so->so_rcv.ssb_hiwat : optval; 2198 atomic_clear_int(&so->so_rcv.ssb_flags, 2199 SSB_AUTOLOWAT); 2200 break; 2201 } 2202 break; 2203 2204 case SO_SNDTIMEO: 2205 case SO_RCVTIMEO: 2206 error = sooptcopyin(sopt, &tv, sizeof tv, 2207 sizeof tv); 2208 if (error) 2209 goto bad; 2210 2211 /* assert(hz > 0); */ 2212 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2213 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2214 error = EDOM; 2215 goto bad; 2216 } 2217 /* assert(tick > 0); */ 2218 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2219 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2220 if (val > INT_MAX) { 2221 error = EDOM; 2222 goto bad; 2223 } 2224 if (val == 0 && tv.tv_usec != 0) 2225 val = 1; 2226 2227 switch (sopt->sopt_name) { 2228 case SO_SNDTIMEO: 2229 so->so_snd.ssb_timeo = val; 2230 break; 2231 case SO_RCVTIMEO: 2232 so->so_rcv.ssb_timeo = val; 2233 break; 2234 } 2235 break; 2236 default: 2237 error = ENOPROTOOPT; 2238 break; 2239 } 2240 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2241 (void) so_pr_ctloutput(so, sopt); 2242 } 2243 } 2244 bad: 2245 return (error); 2246 } 2247 2248 /* Helper routine for getsockopt */ 2249 int 2250 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2251 { 2252 soopt_from_kbuf(sopt, buf, len); 2253 return 0; 2254 } 2255 2256 void 2257 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2258 { 2259 size_t valsize; 2260 2261 if (len == 0) { 2262 sopt->sopt_valsize = 0; 2263 return; 2264 } 2265 2266 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2267 KKASSERT(kva_p(buf)); 2268 2269 /* 2270 * Documented get behavior is that we always return a value, 2271 * possibly truncated to fit in the user's buffer. 2272 * Traditional behavior is that we always tell the user 2273 * precisely how much we copied, rather than something useful 2274 * like the total amount we had available for her. 2275 * Note that this interface is not idempotent; the entire answer must 2276 * generated ahead of time. 2277 */ 2278 valsize = szmin(len, sopt->sopt_valsize); 2279 sopt->sopt_valsize = valsize; 2280 if (sopt->sopt_val != 0) { 2281 bcopy(buf, sopt->sopt_val, valsize); 2282 } 2283 } 2284 2285 int 2286 sogetopt(struct socket *so, struct sockopt *sopt) 2287 { 2288 int error, optval; 2289 long optval_l; 2290 struct linger l; 2291 struct timeval tv; 2292 #ifdef INET 2293 struct accept_filter_arg *afap; 2294 #endif 2295 2296 error = 0; 2297 sopt->sopt_dir = SOPT_GET; 2298 if (sopt->sopt_level != SOL_SOCKET) { 2299 if (so->so_proto && so->so_proto->pr_ctloutput) { 2300 return (so_pr_ctloutput(so, sopt)); 2301 } else 2302 return (ENOPROTOOPT); 2303 } else { 2304 switch (sopt->sopt_name) { 2305 #ifdef INET 2306 case SO_ACCEPTFILTER: 2307 if ((so->so_options & SO_ACCEPTCONN) == 0) 2308 return (EINVAL); 2309 afap = kmalloc(sizeof(*afap), M_TEMP, 2310 M_WAITOK | M_ZERO); 2311 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2312 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2313 if (so->so_accf->so_accept_filter_str != NULL) 2314 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2315 } 2316 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2317 kfree(afap, M_TEMP); 2318 break; 2319 #endif /* INET */ 2320 2321 case SO_LINGER: 2322 l.l_onoff = so->so_options & SO_LINGER; 2323 l.l_linger = so->so_linger; 2324 error = sooptcopyout(sopt, &l, sizeof l); 2325 break; 2326 2327 case SO_USELOOPBACK: 2328 case SO_DONTROUTE: 2329 case SO_DEBUG: 2330 case SO_KEEPALIVE: 2331 case SO_REUSEADDR: 2332 case SO_REUSEPORT: 2333 case SO_BROADCAST: 2334 case SO_OOBINLINE: 2335 case SO_TIMESTAMP: 2336 case SO_NOSIGPIPE: 2337 optval = so->so_options & sopt->sopt_name; 2338 integer: 2339 error = sooptcopyout(sopt, &optval, sizeof optval); 2340 break; 2341 2342 case SO_TYPE: 2343 optval = so->so_type; 2344 goto integer; 2345 2346 case SO_ERROR: 2347 optval = so->so_error; 2348 so->so_error = 0; 2349 goto integer; 2350 2351 case SO_SNDBUF: 2352 optval = so->so_snd.ssb_hiwat; 2353 goto integer; 2354 2355 case SO_RCVBUF: 2356 optval = so->so_rcv.ssb_hiwat; 2357 goto integer; 2358 2359 case SO_SNDLOWAT: 2360 optval = so->so_snd.ssb_lowat; 2361 goto integer; 2362 2363 case SO_RCVLOWAT: 2364 optval = so->so_rcv.ssb_lowat; 2365 goto integer; 2366 2367 case SO_SNDTIMEO: 2368 case SO_RCVTIMEO: 2369 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2370 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2371 2372 tv.tv_sec = optval / hz; 2373 tv.tv_usec = (optval % hz) * ustick; 2374 error = sooptcopyout(sopt, &tv, sizeof tv); 2375 break; 2376 2377 case SO_SNDSPACE: 2378 optval_l = ssb_space(&so->so_snd); 2379 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2380 break; 2381 2382 case SO_CPUHINT: 2383 optval = -1; /* no hint */ 2384 goto integer; 2385 2386 default: 2387 error = ENOPROTOOPT; 2388 break; 2389 } 2390 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2391 so_pr_ctloutput(so, sopt); 2392 return (error); 2393 } 2394 } 2395 2396 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2397 int 2398 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2399 { 2400 struct mbuf *m, *m_prev; 2401 int sopt_size = sopt->sopt_valsize, msize; 2402 2403 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2404 0, &msize); 2405 if (m == NULL) 2406 return (ENOBUFS); 2407 m->m_len = min(msize, sopt_size); 2408 sopt_size -= m->m_len; 2409 *mp = m; 2410 m_prev = m; 2411 2412 while (sopt_size > 0) { 2413 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2414 MT_DATA, 0, &msize); 2415 if (m == NULL) { 2416 m_freem(*mp); 2417 return (ENOBUFS); 2418 } 2419 m->m_len = min(msize, sopt_size); 2420 sopt_size -= m->m_len; 2421 m_prev->m_next = m; 2422 m_prev = m; 2423 } 2424 return (0); 2425 } 2426 2427 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2428 int 2429 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2430 { 2431 soopt_to_mbuf(sopt, m); 2432 return 0; 2433 } 2434 2435 void 2436 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2437 { 2438 size_t valsize; 2439 void *val; 2440 2441 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2442 KKASSERT(kva_p(m)); 2443 if (sopt->sopt_val == NULL) 2444 return; 2445 val = sopt->sopt_val; 2446 valsize = sopt->sopt_valsize; 2447 while (m != NULL && valsize >= m->m_len) { 2448 bcopy(val, mtod(m, char *), m->m_len); 2449 valsize -= m->m_len; 2450 val = (caddr_t)val + m->m_len; 2451 m = m->m_next; 2452 } 2453 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2454 panic("ip6_sooptmcopyin"); 2455 } 2456 2457 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2458 int 2459 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2460 { 2461 return soopt_from_mbuf(sopt, m); 2462 } 2463 2464 int 2465 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2466 { 2467 struct mbuf *m0 = m; 2468 size_t valsize = 0; 2469 size_t maxsize; 2470 void *val; 2471 2472 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2473 KKASSERT(kva_p(m)); 2474 if (sopt->sopt_val == NULL) 2475 return 0; 2476 val = sopt->sopt_val; 2477 maxsize = sopt->sopt_valsize; 2478 while (m != NULL && maxsize >= m->m_len) { 2479 bcopy(mtod(m, char *), val, m->m_len); 2480 maxsize -= m->m_len; 2481 val = (caddr_t)val + m->m_len; 2482 valsize += m->m_len; 2483 m = m->m_next; 2484 } 2485 if (m != NULL) { 2486 /* enough soopt buffer should be given from user-land */ 2487 m_freem(m0); 2488 return (EINVAL); 2489 } 2490 sopt->sopt_valsize = valsize; 2491 return 0; 2492 } 2493 2494 void 2495 sohasoutofband(struct socket *so) 2496 { 2497 if (so->so_sigio != NULL) 2498 pgsigio(so->so_sigio, SIGURG, 0); 2499 /* 2500 * NOTE: 2501 * There is no need to use NOTE_OOB as KNOTE hint here: 2502 * soread filter depends on so_oobmark and SS_RCVATMARK 2503 * so_state. NOTE_OOB would cause unnecessary penalty 2504 * in KNOTE, if there was knote processing contention. 2505 */ 2506 KNOTE(&so->so_rcv.ssb_kq.ki_note, 0); 2507 } 2508 2509 int 2510 sokqfilter(struct file *fp, struct knote *kn) 2511 { 2512 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2513 struct signalsockbuf *ssb; 2514 2515 switch (kn->kn_filter) { 2516 case EVFILT_READ: 2517 if (so->so_options & SO_ACCEPTCONN) 2518 kn->kn_fop = &solisten_filtops; 2519 else 2520 kn->kn_fop = &soread_filtops; 2521 ssb = &so->so_rcv; 2522 break; 2523 case EVFILT_WRITE: 2524 kn->kn_fop = &sowrite_filtops; 2525 ssb = &so->so_snd; 2526 break; 2527 case EVFILT_EXCEPT: 2528 kn->kn_fop = &soexcept_filtops; 2529 ssb = &so->so_rcv; 2530 break; 2531 default: 2532 return (EOPNOTSUPP); 2533 } 2534 2535 knote_insert(&ssb->ssb_kq.ki_note, kn); 2536 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2537 return (0); 2538 } 2539 2540 static void 2541 filt_sordetach(struct knote *kn) 2542 { 2543 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2544 2545 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2546 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2547 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2548 } 2549 2550 /*ARGSUSED*/ 2551 static int 2552 filt_soread(struct knote *kn, long hint __unused) 2553 { 2554 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2555 2556 if (kn->kn_sfflags & NOTE_OOB) { 2557 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2558 kn->kn_fflags |= NOTE_OOB; 2559 return (1); 2560 } 2561 return (0); 2562 } 2563 kn->kn_data = so->so_rcv.ssb_cc; 2564 2565 if (so->so_state & SS_CANTRCVMORE) { 2566 /* 2567 * Only set NODATA if all data has been exhausted. 2568 */ 2569 if (kn->kn_data == 0) 2570 kn->kn_flags |= EV_NODATA; 2571 kn->kn_flags |= EV_EOF; 2572 kn->kn_fflags = so->so_error; 2573 return (1); 2574 } 2575 if (so->so_error) /* temporary udp error */ 2576 return (1); 2577 if (kn->kn_sfflags & NOTE_LOWAT) 2578 return (kn->kn_data >= kn->kn_sdata); 2579 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2580 !TAILQ_EMPTY(&so->so_comp)); 2581 } 2582 2583 static void 2584 filt_sowdetach(struct knote *kn) 2585 { 2586 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2587 2588 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2589 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2590 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2591 } 2592 2593 /*ARGSUSED*/ 2594 static int 2595 filt_sowrite(struct knote *kn, long hint __unused) 2596 { 2597 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2598 2599 if (so->so_snd.ssb_flags & SSB_PREALLOC) 2600 kn->kn_data = ssb_space_prealloc(&so->so_snd); 2601 else 2602 kn->kn_data = ssb_space(&so->so_snd); 2603 2604 if (so->so_state & SS_CANTSENDMORE) { 2605 kn->kn_flags |= (EV_EOF | EV_NODATA); 2606 kn->kn_fflags = so->so_error; 2607 return (1); 2608 } 2609 if (so->so_error) /* temporary udp error */ 2610 return (1); 2611 if (((so->so_state & SS_ISCONNECTED) == 0) && 2612 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2613 return (0); 2614 if (kn->kn_sfflags & NOTE_LOWAT) 2615 return (kn->kn_data >= kn->kn_sdata); 2616 return (kn->kn_data >= so->so_snd.ssb_lowat); 2617 } 2618 2619 /*ARGSUSED*/ 2620 static int 2621 filt_solisten(struct knote *kn, long hint __unused) 2622 { 2623 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2624 int qlen = so->so_qlen; 2625 2626 if (soavailconn > 0 && qlen > soavailconn) 2627 qlen = soavailconn; 2628 kn->kn_data = qlen; 2629 2630 return (!TAILQ_EMPTY(&so->so_comp)); 2631 } 2632