1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 static int use_socreate_fast = 1; 152 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 153 &use_socreate_fast, 0, "Fast socket creation"); 154 155 /* 156 * Socket operation routines. 157 * These routines are called by the routines in 158 * sys_socket.c or from a system process, and 159 * implement the semantics of socket operations by 160 * switching out to the protocol specific routines. 161 */ 162 163 /* 164 * Get a socket structure, and initialize it. 165 * Note that it would probably be better to allocate socket 166 * and PCB at the same time, but I'm not convinced that all 167 * the protocols can be easily modified to do this. 168 */ 169 struct socket * 170 soalloc(int waitok, struct protosw *pr) 171 { 172 struct socket *so; 173 unsigned waitmask; 174 175 waitmask = waitok ? M_WAITOK : M_NOWAIT; 176 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 177 if (so) { 178 /* XXX race condition for reentrant kernel */ 179 so->so_proto = pr; 180 TAILQ_INIT(&so->so_aiojobq); 181 TAILQ_INIT(&so->so_rcv.ssb_mlist); 182 TAILQ_INIT(&so->so_snd.ssb_mlist); 183 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 184 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 185 spin_init(&so->so_rcvd_spin, "soalloc"); 186 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 187 MSGF_DROPABLE | MSGF_PRIORITY, 188 so->so_proto->pr_usrreqs->pru_rcvd); 189 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 190 so->so_state = SS_NOFDREF; 191 so->so_refs = 1; 192 } 193 return so; 194 } 195 196 int 197 socreate(int dom, struct socket **aso, int type, 198 int proto, struct thread *td) 199 { 200 struct proc *p = td->td_proc; 201 struct protosw *prp; 202 struct socket *so; 203 struct pru_attach_info ai; 204 int error; 205 206 if (proto) 207 prp = pffindproto(dom, proto, type); 208 else 209 prp = pffindtype(dom, type); 210 211 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 212 return (EPROTONOSUPPORT); 213 214 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 215 prp->pr_domain->dom_family != PF_LOCAL && 216 prp->pr_domain->dom_family != PF_INET && 217 prp->pr_domain->dom_family != PF_INET6 && 218 prp->pr_domain->dom_family != PF_ROUTE) { 219 return (EPROTONOSUPPORT); 220 } 221 222 if (prp->pr_type != type) 223 return (EPROTOTYPE); 224 so = soalloc(p != NULL, prp); 225 if (so == NULL) 226 return (ENOBUFS); 227 228 /* 229 * Callers of socreate() presumably will connect up a descriptor 230 * and call soclose() if they cannot. This represents our so_refs 231 * (which should be 1) from soalloc(). 232 */ 233 soclrstate(so, SS_NOFDREF); 234 235 /* 236 * Set a default port for protocol processing. No action will occur 237 * on the socket on this port until an inpcb is attached to it and 238 * is able to match incoming packets, or until the socket becomes 239 * available to userland. 240 * 241 * We normally default the socket to the protocol thread on cpu 0, 242 * if protocol does not provide its own method to initialize the 243 * default port. 244 * 245 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 246 * thread and all pr_*()/pru_*() calls are executed synchronously. 247 */ 248 if (prp->pr_flags & PR_SYNC_PORT) 249 so->so_port = &netisr_sync_port; 250 else if (prp->pr_initport != NULL) 251 so->so_port = prp->pr_initport(); 252 else 253 so->so_port = netisr_cpuport(0); 254 255 TAILQ_INIT(&so->so_incomp); 256 TAILQ_INIT(&so->so_comp); 257 so->so_type = type; 258 so->so_cred = crhold(p->p_ucred); 259 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 260 ai.p_ucred = p->p_ucred; 261 ai.fd_rdir = p->p_fd->fd_rdir; 262 263 /* 264 * Auto-sizing of socket buffers is managed by the protocols and 265 * the appropriate flags must be set in the pru_attach function. 266 */ 267 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 268 error = so_pru_attach_fast(so, proto, &ai); 269 else 270 error = so_pru_attach(so, proto, &ai); 271 if (error) { 272 sosetstate(so, SS_NOFDREF); 273 sofree(so); /* from soalloc */ 274 return error; 275 } 276 277 /* 278 * NOTE: Returns referenced socket. 279 */ 280 *aso = so; 281 return (0); 282 } 283 284 int 285 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 286 { 287 int error; 288 289 error = so_pru_bind(so, nam, td); 290 return (error); 291 } 292 293 static void 294 sodealloc(struct socket *so) 295 { 296 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 297 298 #ifdef INVARIANTS 299 if (so->so_options & SO_ACCEPTCONN) { 300 KASSERT(TAILQ_EMPTY(&so->so_comp), ("so_comp is not empty")); 301 KASSERT(TAILQ_EMPTY(&so->so_incomp), 302 ("so_incomp is not empty")); 303 } 304 #endif 305 306 if (so->so_rcv.ssb_hiwat) 307 (void)chgsbsize(so->so_cred->cr_uidinfo, 308 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 309 if (so->so_snd.ssb_hiwat) 310 (void)chgsbsize(so->so_cred->cr_uidinfo, 311 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 312 #ifdef INET 313 /* remove accept filter if present */ 314 if (so->so_accf != NULL) 315 do_setopt_accept_filter(so, NULL); 316 #endif /* INET */ 317 crfree(so->so_cred); 318 if (so->so_faddr != NULL) 319 kfree(so->so_faddr, M_SONAME); 320 kfree(so, M_SOCKET); 321 } 322 323 int 324 solisten(struct socket *so, int backlog, struct thread *td) 325 { 326 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 327 return (EINVAL); 328 329 lwkt_gettoken(&so->so_rcv.ssb_token); 330 if (TAILQ_EMPTY(&so->so_comp)) 331 so->so_options |= SO_ACCEPTCONN; 332 lwkt_reltoken(&so->so_rcv.ssb_token); 333 if (backlog < 0 || backlog > somaxconn) 334 backlog = somaxconn; 335 so->so_qlimit = backlog; 336 return so_pru_listen(so, td); 337 } 338 339 static void 340 soqflush(struct socket *so) 341 { 342 lwkt_getpooltoken(so); 343 if (so->so_options & SO_ACCEPTCONN) { 344 struct socket *sp; 345 346 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 347 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 348 SS_INCOMP); 349 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 350 so->so_incqlen--; 351 soclrstate(sp, SS_INCOMP); 352 soabort_async(sp, TRUE); 353 } 354 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 355 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 356 SS_COMP); 357 TAILQ_REMOVE(&so->so_comp, sp, so_list); 358 so->so_qlen--; 359 soclrstate(sp, SS_COMP); 360 soabort_async(sp, TRUE); 361 } 362 } 363 lwkt_relpooltoken(so); 364 } 365 366 /* 367 * Destroy a disconnected socket. This routine is a NOP if entities 368 * still have a reference on the socket: 369 * 370 * so_pcb - The protocol stack still has a reference 371 * SS_NOFDREF - There is no longer a file pointer reference 372 */ 373 void 374 sofree(struct socket *so) 375 { 376 struct socket *head; 377 378 /* 379 * This is a bit hackish at the moment. We need to interlock 380 * any accept queue we are on before we potentially lose the 381 * last reference to avoid races against a re-reference from 382 * someone operating on the queue. 383 */ 384 while ((head = so->so_head) != NULL) { 385 lwkt_getpooltoken(head); 386 if (so->so_head == head) 387 break; 388 lwkt_relpooltoken(head); 389 } 390 391 /* 392 * Arbitrage the last free. 393 */ 394 KKASSERT(so->so_refs > 0); 395 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 396 if (head) 397 lwkt_relpooltoken(head); 398 return; 399 } 400 401 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 402 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 403 404 if (head != NULL) { 405 /* 406 * We're done, remove ourselves from the accept queue we are 407 * on, if we are on one. 408 */ 409 if (so->so_state & SS_INCOMP) { 410 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 411 SS_INCOMP); 412 TAILQ_REMOVE(&head->so_incomp, so, so_list); 413 head->so_incqlen--; 414 } else if (so->so_state & SS_COMP) { 415 /* 416 * We must not decommission a socket that's 417 * on the accept(2) queue. If we do, then 418 * accept(2) may hang after select(2) indicated 419 * that the listening socket was ready. 420 */ 421 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 422 SS_COMP); 423 lwkt_relpooltoken(head); 424 return; 425 } else { 426 panic("sofree: not queued"); 427 } 428 soclrstate(so, SS_INCOMP); 429 so->so_head = NULL; 430 lwkt_relpooltoken(head); 431 } else { 432 /* Flush accept queues, if we are accepting. */ 433 soqflush(so); 434 } 435 ssb_release(&so->so_snd, so); 436 sorflush(so); 437 sodealloc(so); 438 } 439 440 /* 441 * Close a socket on last file table reference removal. 442 * Initiate disconnect if connected. 443 * Free socket when disconnect complete. 444 */ 445 int 446 soclose(struct socket *so, int fflag) 447 { 448 int error; 449 450 funsetown(&so->so_sigio); 451 sosetstate(so, SS_ISCLOSING); 452 if (!use_soclose_fast || 453 (so->so_proto->pr_flags & PR_SYNC_PORT) || 454 ((so->so_state & SS_ISCONNECTED) && 455 (so->so_options & SO_LINGER))) { 456 error = soclose_sync(so, fflag); 457 } else { 458 soclose_fast(so); 459 error = 0; 460 } 461 return error; 462 } 463 464 void 465 sodiscard(struct socket *so) 466 { 467 if (so->so_state & SS_NOFDREF) 468 panic("soclose: NOFDREF"); 469 sosetstate(so, SS_NOFDREF); /* take ref */ 470 } 471 472 /* 473 * Append the completed queue of head to head_inh (inherting listen socket). 474 */ 475 void 476 soinherit(struct socket *head, struct socket *head_inh) 477 { 478 boolean_t do_wakeup = FALSE; 479 480 KASSERT(head->so_options & SO_ACCEPTCONN, 481 ("head does not accept connection")); 482 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 483 ("head_inh does not accept connection")); 484 485 lwkt_getpooltoken(head); 486 lwkt_getpooltoken(head_inh); 487 488 if (head->so_qlen > 0) 489 do_wakeup = TRUE; 490 491 while (!TAILQ_EMPTY(&head->so_comp)) { 492 struct ucred *old_cr; 493 struct socket *sp; 494 495 sp = TAILQ_FIRST(&head->so_comp); 496 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 497 498 /* 499 * Remove this socket from the current listen socket 500 * completed queue. 501 */ 502 TAILQ_REMOVE(&head->so_comp, sp, so_list); 503 head->so_qlen--; 504 505 /* Save the old ucred for later free. */ 506 old_cr = sp->so_cred; 507 508 /* 509 * Install this socket to the inheriting listen socket 510 * completed queue. 511 */ 512 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 513 sp->so_head = head_inh; 514 515 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 516 head_inh->so_qlen++; 517 518 /* 519 * NOTE: 520 * crfree() may block and release the tokens temporarily. 521 * However, we are fine here, since the transition is done. 522 */ 523 crfree(old_cr); 524 } 525 526 lwkt_relpooltoken(head_inh); 527 lwkt_relpooltoken(head); 528 529 if (do_wakeup) { 530 /* 531 * "New" connections have arrived 532 */ 533 sorwakeup(head_inh); 534 wakeup(&head_inh->so_timeo); 535 } 536 } 537 538 static int 539 soclose_sync(struct socket *so, int fflag) 540 { 541 int error = 0; 542 543 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 544 so_pru_sync(so); /* unpend async prus */ 545 546 if (so->so_pcb == NULL) 547 goto discard; 548 549 if (so->so_state & SS_ISCONNECTED) { 550 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 551 error = sodisconnect(so); 552 if (error) 553 goto drop; 554 } 555 if (so->so_options & SO_LINGER) { 556 if ((so->so_state & SS_ISDISCONNECTING) && 557 (fflag & FNONBLOCK)) 558 goto drop; 559 while (so->so_state & SS_ISCONNECTED) { 560 error = tsleep(&so->so_timeo, PCATCH, 561 "soclos", so->so_linger * hz); 562 if (error) 563 break; 564 } 565 } 566 } 567 drop: 568 if (so->so_pcb) { 569 int error2; 570 571 error2 = so_pru_detach(so); 572 if (error2 == EJUSTRETURN) { 573 /* 574 * Protocol will call sodiscard() 575 * and sofree() for us. 576 */ 577 return error; 578 } 579 if (error == 0) 580 error = error2; 581 } 582 discard: 583 sodiscard(so); 584 sofree(so); /* dispose of ref */ 585 586 return (error); 587 } 588 589 static void 590 soclose_fast_handler(netmsg_t msg) 591 { 592 struct socket *so = msg->base.nm_so; 593 594 if (so->so_pcb == NULL) 595 goto discard; 596 597 if ((so->so_state & SS_ISCONNECTED) && 598 (so->so_state & SS_ISDISCONNECTING) == 0) 599 so_pru_disconnect_direct(so); 600 601 if (so->so_pcb) { 602 int error; 603 604 error = so_pru_detach_direct(so); 605 if (error == EJUSTRETURN) { 606 /* 607 * Protocol will call sodiscard() 608 * and sofree() for us. 609 */ 610 return; 611 } 612 } 613 discard: 614 sodiscard(so); 615 sofree(so); 616 } 617 618 static void 619 soclose_fast(struct socket *so) 620 { 621 struct netmsg_base *base = &so->so_clomsg; 622 623 netmsg_init(base, so, &netisr_apanic_rport, 0, 624 soclose_fast_handler); 625 lwkt_sendmsg(so->so_port, &base->lmsg); 626 } 627 628 /* 629 * Abort and destroy a socket. Only one abort can be in progress 630 * at any given moment. 631 */ 632 void 633 soabort_async(struct socket *so, boolean_t clr_head) 634 { 635 /* 636 * Keep a reference before clearing the so_head 637 * to avoid racing socket close in netisr. 638 */ 639 soreference(so); 640 if (clr_head) 641 so->so_head = NULL; 642 so_pru_abort_async(so); 643 } 644 645 void 646 soabort_direct(struct socket *so) 647 { 648 soreference(so); 649 so_pru_abort_direct(so); 650 } 651 652 /* 653 * so is passed in ref'd, which becomes owned by 654 * the cleared SS_NOFDREF flag. 655 */ 656 void 657 soaccept_generic(struct socket *so) 658 { 659 if ((so->so_state & SS_NOFDREF) == 0) 660 panic("soaccept: !NOFDREF"); 661 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 662 } 663 664 int 665 soaccept(struct socket *so, struct sockaddr **nam) 666 { 667 int error; 668 669 soaccept_generic(so); 670 error = so_pru_accept(so, nam); 671 return (error); 672 } 673 674 int 675 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 676 boolean_t sync) 677 { 678 int error; 679 680 if (so->so_options & SO_ACCEPTCONN) 681 return (EOPNOTSUPP); 682 /* 683 * If protocol is connection-based, can only connect once. 684 * Otherwise, if connected, try to disconnect first. 685 * This allows user to disconnect by connecting to, e.g., 686 * a null address. 687 */ 688 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 689 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 690 (error = sodisconnect(so)))) { 691 error = EISCONN; 692 } else { 693 /* 694 * Prevent accumulated error from previous connection 695 * from biting us. 696 */ 697 so->so_error = 0; 698 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 699 error = so_pru_connect_async(so, nam, td); 700 else 701 error = so_pru_connect(so, nam, td); 702 } 703 return (error); 704 } 705 706 int 707 soconnect2(struct socket *so1, struct socket *so2) 708 { 709 int error; 710 711 error = so_pru_connect2(so1, so2); 712 return (error); 713 } 714 715 int 716 sodisconnect(struct socket *so) 717 { 718 int error; 719 720 if ((so->so_state & SS_ISCONNECTED) == 0) { 721 error = ENOTCONN; 722 goto bad; 723 } 724 if (so->so_state & SS_ISDISCONNECTING) { 725 error = EALREADY; 726 goto bad; 727 } 728 error = so_pru_disconnect(so); 729 bad: 730 return (error); 731 } 732 733 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 734 /* 735 * Send on a socket. 736 * If send must go all at once and message is larger than 737 * send buffering, then hard error. 738 * Lock against other senders. 739 * If must go all at once and not enough room now, then 740 * inform user that this would block and do nothing. 741 * Otherwise, if nonblocking, send as much as possible. 742 * The data to be sent is described by "uio" if nonzero, 743 * otherwise by the mbuf chain "top" (which must be null 744 * if uio is not). Data provided in mbuf chain must be small 745 * enough to send all at once. 746 * 747 * Returns nonzero on error, timeout or signal; callers 748 * must check for short counts if EINTR/ERESTART are returned. 749 * Data and control buffers are freed on return. 750 */ 751 int 752 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 753 struct mbuf *top, struct mbuf *control, int flags, 754 struct thread *td) 755 { 756 struct mbuf **mp; 757 struct mbuf *m; 758 size_t resid; 759 int space, len; 760 int clen = 0, error, dontroute, mlen; 761 int atomic = sosendallatonce(so) || top; 762 int pru_flags; 763 764 if (uio) { 765 resid = uio->uio_resid; 766 } else { 767 resid = (size_t)top->m_pkthdr.len; 768 #ifdef INVARIANTS 769 len = 0; 770 for (m = top; m; m = m->m_next) 771 len += m->m_len; 772 KKASSERT(top->m_pkthdr.len == len); 773 #endif 774 } 775 776 /* 777 * WARNING! resid is unsigned, space and len are signed. space 778 * can wind up negative if the sockbuf is overcommitted. 779 * 780 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 781 * type sockets since that's an error. 782 */ 783 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 784 error = EINVAL; 785 goto out; 786 } 787 788 dontroute = 789 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 790 (so->so_proto->pr_flags & PR_ATOMIC); 791 if (td->td_lwp != NULL) 792 td->td_lwp->lwp_ru.ru_msgsnd++; 793 if (control) 794 clen = control->m_len; 795 #define gotoerr(errcode) { error = errcode; goto release; } 796 797 restart: 798 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 799 if (error) 800 goto out; 801 802 do { 803 if (so->so_state & SS_CANTSENDMORE) 804 gotoerr(EPIPE); 805 if (so->so_error) { 806 error = so->so_error; 807 so->so_error = 0; 808 goto release; 809 } 810 if ((so->so_state & SS_ISCONNECTED) == 0) { 811 /* 812 * `sendto' and `sendmsg' is allowed on a connection- 813 * based socket if it supports implied connect. 814 * Return ENOTCONN if not connected and no address is 815 * supplied. 816 */ 817 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 818 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 819 if ((so->so_state & SS_ISCONFIRMING) == 0 && 820 !(resid == 0 && clen != 0)) 821 gotoerr(ENOTCONN); 822 } else if (addr == NULL) 823 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 824 ENOTCONN : EDESTADDRREQ); 825 } 826 if ((atomic && resid > so->so_snd.ssb_hiwat) || 827 clen > so->so_snd.ssb_hiwat) { 828 gotoerr(EMSGSIZE); 829 } 830 space = ssb_space(&so->so_snd); 831 if (flags & MSG_OOB) 832 space += 1024; 833 if ((space < 0 || (size_t)space < resid + clen) && uio && 834 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 835 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 836 gotoerr(EWOULDBLOCK); 837 ssb_unlock(&so->so_snd); 838 error = ssb_wait(&so->so_snd); 839 if (error) 840 goto out; 841 goto restart; 842 } 843 mp = ⊤ 844 space -= clen; 845 do { 846 if (uio == NULL) { 847 /* 848 * Data is prepackaged in "top". 849 */ 850 resid = 0; 851 if (flags & MSG_EOR) 852 top->m_flags |= M_EOR; 853 } else do { 854 if (resid > INT_MAX) 855 resid = INT_MAX; 856 m = m_getl((int)resid, M_WAITOK, MT_DATA, 857 top == NULL ? M_PKTHDR : 0, &mlen); 858 if (top == NULL) { 859 m->m_pkthdr.len = 0; 860 m->m_pkthdr.rcvif = NULL; 861 } 862 len = imin((int)szmin(mlen, resid), space); 863 if (resid < MINCLSIZE) { 864 /* 865 * For datagram protocols, leave room 866 * for protocol headers in first mbuf. 867 */ 868 if (atomic && top == NULL && len < mlen) 869 MH_ALIGN(m, len); 870 } 871 space -= len; 872 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 873 resid = uio->uio_resid; 874 m->m_len = len; 875 *mp = m; 876 top->m_pkthdr.len += len; 877 if (error) 878 goto release; 879 mp = &m->m_next; 880 if (resid == 0) { 881 if (flags & MSG_EOR) 882 top->m_flags |= M_EOR; 883 break; 884 } 885 } while (space > 0 && atomic); 886 if (dontroute) 887 so->so_options |= SO_DONTROUTE; 888 if (flags & MSG_OOB) { 889 pru_flags = PRUS_OOB; 890 } else if ((flags & MSG_EOF) && 891 (so->so_proto->pr_flags & PR_IMPLOPCL) && 892 (resid == 0)) { 893 /* 894 * If the user set MSG_EOF, the protocol 895 * understands this flag and nothing left to 896 * send then use PRU_SEND_EOF instead of PRU_SEND. 897 */ 898 pru_flags = PRUS_EOF; 899 } else if (resid > 0 && space > 0) { 900 /* If there is more to send, set PRUS_MORETOCOME */ 901 pru_flags = PRUS_MORETOCOME; 902 } else { 903 pru_flags = 0; 904 } 905 /* 906 * XXX all the SS_CANTSENDMORE checks previously 907 * done could be out of date. We could have recieved 908 * a reset packet in an interrupt or maybe we slept 909 * while doing page faults in uiomove() etc. We could 910 * probably recheck again inside the splnet() protection 911 * here, but there are probably other places that this 912 * also happens. We must rethink this. 913 */ 914 error = so_pru_send(so, pru_flags, top, addr, control, td); 915 if (dontroute) 916 so->so_options &= ~SO_DONTROUTE; 917 clen = 0; 918 control = NULL; 919 top = NULL; 920 mp = ⊤ 921 if (error) 922 goto release; 923 } while (resid && space > 0); 924 } while (resid); 925 926 release: 927 ssb_unlock(&so->so_snd); 928 out: 929 if (top) 930 m_freem(top); 931 if (control) 932 m_freem(control); 933 return (error); 934 } 935 936 #ifdef INET 937 /* 938 * A specialization of sosend() for UDP based on protocol-specific knowledge: 939 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 940 * sosendallatonce() returns true, 941 * the "atomic" variable is true, 942 * and sosendudp() blocks until space is available for the entire send. 943 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 944 * PR_IMPLOPCL flags set. 945 * UDP has no out-of-band data. 946 * UDP has no control data. 947 * UDP does not support MSG_EOR. 948 */ 949 int 950 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 951 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 952 { 953 size_t resid; 954 int error, pru_flags = 0; 955 int space; 956 957 if (td->td_lwp != NULL) 958 td->td_lwp->lwp_ru.ru_msgsnd++; 959 if (control) 960 m_freem(control); 961 962 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 963 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 964 965 restart: 966 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 967 if (error) 968 goto out; 969 970 if (so->so_state & SS_CANTSENDMORE) 971 gotoerr(EPIPE); 972 if (so->so_error) { 973 error = so->so_error; 974 so->so_error = 0; 975 goto release; 976 } 977 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 978 gotoerr(EDESTADDRREQ); 979 if (resid > so->so_snd.ssb_hiwat) 980 gotoerr(EMSGSIZE); 981 space = ssb_space(&so->so_snd); 982 if (uio && (space < 0 || (size_t)space < resid)) { 983 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 984 gotoerr(EWOULDBLOCK); 985 ssb_unlock(&so->so_snd); 986 error = ssb_wait(&so->so_snd); 987 if (error) 988 goto out; 989 goto restart; 990 } 991 992 if (uio) { 993 int hdrlen = max_hdr; 994 995 /* 996 * We try to optimize out the additional mbuf 997 * allocations in M_PREPEND() on output path, e.g. 998 * - udp_output(), when it tries to prepend protocol 999 * headers. 1000 * - Link layer output function, when it tries to 1001 * prepend link layer header. 1002 * 1003 * This probably will not benefit any data that will 1004 * be fragmented, so this optimization is only performed 1005 * when the size of data and max size of protocol+link 1006 * headers fit into one mbuf cluster. 1007 */ 1008 if (uio->uio_resid > MCLBYTES - hdrlen || 1009 !udp_sosend_prepend) { 1010 top = m_uiomove(uio); 1011 if (top == NULL) 1012 goto release; 1013 } else { 1014 int nsize; 1015 1016 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1017 MT_DATA, M_PKTHDR, &nsize); 1018 KASSERT(nsize >= uio->uio_resid + hdrlen, 1019 ("sosendudp invalid nsize %d, " 1020 "resid %zu, hdrlen %d", 1021 nsize, uio->uio_resid, hdrlen)); 1022 1023 top->m_len = uio->uio_resid; 1024 top->m_pkthdr.len = uio->uio_resid; 1025 top->m_data += hdrlen; 1026 1027 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1028 if (error) 1029 goto out; 1030 } 1031 } 1032 1033 if (flags & MSG_DONTROUTE) 1034 pru_flags |= PRUS_DONTROUTE; 1035 1036 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1037 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1038 error = 0; 1039 } else { 1040 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1041 } 1042 top = NULL; /* sent or freed in lower layer */ 1043 1044 release: 1045 ssb_unlock(&so->so_snd); 1046 out: 1047 if (top) 1048 m_freem(top); 1049 return (error); 1050 } 1051 1052 int 1053 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1054 struct mbuf *top, struct mbuf *control, int flags, 1055 struct thread *td) 1056 { 1057 struct mbuf **mp; 1058 struct mbuf *m; 1059 size_t resid; 1060 int space, len; 1061 int error, mlen; 1062 int allatonce; 1063 int pru_flags; 1064 1065 if (uio) { 1066 KKASSERT(top == NULL); 1067 allatonce = 0; 1068 resid = uio->uio_resid; 1069 } else { 1070 allatonce = 1; 1071 resid = (size_t)top->m_pkthdr.len; 1072 #ifdef INVARIANTS 1073 len = 0; 1074 for (m = top; m; m = m->m_next) 1075 len += m->m_len; 1076 KKASSERT(top->m_pkthdr.len == len); 1077 #endif 1078 } 1079 1080 /* 1081 * WARNING! resid is unsigned, space and len are signed. space 1082 * can wind up negative if the sockbuf is overcommitted. 1083 * 1084 * Also check to make sure that MSG_EOR isn't used on TCP 1085 */ 1086 if (flags & MSG_EOR) { 1087 error = EINVAL; 1088 goto out; 1089 } 1090 1091 if (control) { 1092 /* TCP doesn't do control messages (rights, creds, etc) */ 1093 if (control->m_len) { 1094 error = EINVAL; 1095 goto out; 1096 } 1097 m_freem(control); /* empty control, just free it */ 1098 control = NULL; 1099 } 1100 1101 if (td->td_lwp != NULL) 1102 td->td_lwp->lwp_ru.ru_msgsnd++; 1103 1104 #define gotoerr(errcode) { error = errcode; goto release; } 1105 1106 restart: 1107 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1108 if (error) 1109 goto out; 1110 1111 do { 1112 if (so->so_state & SS_CANTSENDMORE) 1113 gotoerr(EPIPE); 1114 if (so->so_error) { 1115 error = so->so_error; 1116 so->so_error = 0; 1117 goto release; 1118 } 1119 if ((so->so_state & SS_ISCONNECTED) == 0 && 1120 (so->so_state & SS_ISCONFIRMING) == 0) 1121 gotoerr(ENOTCONN); 1122 if (allatonce && resid > so->so_snd.ssb_hiwat) 1123 gotoerr(EMSGSIZE); 1124 1125 space = ssb_space_prealloc(&so->so_snd); 1126 if (flags & MSG_OOB) 1127 space += 1024; 1128 if ((space < 0 || (size_t)space < resid) && !allatonce && 1129 space < so->so_snd.ssb_lowat) { 1130 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1131 gotoerr(EWOULDBLOCK); 1132 ssb_unlock(&so->so_snd); 1133 error = ssb_wait(&so->so_snd); 1134 if (error) 1135 goto out; 1136 goto restart; 1137 } 1138 mp = ⊤ 1139 do { 1140 int cnt = 0, async = 0; 1141 1142 if (uio == NULL) { 1143 /* 1144 * Data is prepackaged in "top". 1145 */ 1146 resid = 0; 1147 } else do { 1148 if (resid > INT_MAX) 1149 resid = INT_MAX; 1150 if (tcp_sosend_jcluster) { 1151 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1152 top == NULL ? M_PKTHDR : 0, &mlen); 1153 } else { 1154 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1155 top == NULL ? M_PKTHDR : 0, &mlen); 1156 } 1157 if (top == NULL) { 1158 m->m_pkthdr.len = 0; 1159 m->m_pkthdr.rcvif = NULL; 1160 } 1161 len = imin((int)szmin(mlen, resid), space); 1162 space -= len; 1163 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1164 resid = uio->uio_resid; 1165 m->m_len = len; 1166 *mp = m; 1167 top->m_pkthdr.len += len; 1168 if (error) 1169 goto release; 1170 mp = &m->m_next; 1171 if (resid == 0) 1172 break; 1173 ++cnt; 1174 } while (space > 0 && cnt < tcp_sosend_agglim); 1175 1176 if (tcp_sosend_async) 1177 async = 1; 1178 1179 if (flags & MSG_OOB) { 1180 pru_flags = PRUS_OOB; 1181 async = 0; 1182 } else if ((flags & MSG_EOF) && resid == 0) { 1183 pru_flags = PRUS_EOF; 1184 } else if (resid > 0 && space > 0) { 1185 /* If there is more to send, set PRUS_MORETOCOME */ 1186 pru_flags = PRUS_MORETOCOME; 1187 async = 1; 1188 } else { 1189 pru_flags = 0; 1190 } 1191 1192 if (flags & MSG_SYNC) 1193 async = 0; 1194 1195 /* 1196 * XXX all the SS_CANTSENDMORE checks previously 1197 * done could be out of date. We could have recieved 1198 * a reset packet in an interrupt or maybe we slept 1199 * while doing page faults in uiomove() etc. We could 1200 * probably recheck again inside the splnet() protection 1201 * here, but there are probably other places that this 1202 * also happens. We must rethink this. 1203 */ 1204 for (m = top; m; m = m->m_next) 1205 ssb_preallocstream(&so->so_snd, m); 1206 if (!async) { 1207 error = so_pru_send(so, pru_flags, top, 1208 NULL, NULL, td); 1209 } else { 1210 so_pru_send_async(so, pru_flags, top, 1211 NULL, NULL, td); 1212 error = 0; 1213 } 1214 1215 top = NULL; 1216 mp = ⊤ 1217 if (error) 1218 goto release; 1219 } while (resid && space > 0); 1220 } while (resid); 1221 1222 release: 1223 ssb_unlock(&so->so_snd); 1224 out: 1225 if (top) 1226 m_freem(top); 1227 if (control) 1228 m_freem(control); 1229 return (error); 1230 } 1231 #endif 1232 1233 /* 1234 * Implement receive operations on a socket. 1235 * 1236 * We depend on the way that records are added to the signalsockbuf 1237 * by sbappend*. In particular, each record (mbufs linked through m_next) 1238 * must begin with an address if the protocol so specifies, 1239 * followed by an optional mbuf or mbufs containing ancillary data, 1240 * and then zero or more mbufs of data. 1241 * 1242 * Although the signalsockbuf is locked, new data may still be appended. 1243 * A token inside the ssb_lock deals with MP issues and still allows 1244 * the network to access the socket if we block in a uio. 1245 * 1246 * The caller may receive the data as a single mbuf chain by supplying 1247 * an mbuf **mp0 for use in returning the chain. The uio is then used 1248 * only for the count in uio_resid. 1249 */ 1250 int 1251 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1252 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1253 { 1254 struct mbuf *m, *n; 1255 struct mbuf *free_chain = NULL; 1256 int flags, len, error, offset; 1257 struct protosw *pr = so->so_proto; 1258 int moff, type = 0; 1259 size_t resid, orig_resid; 1260 boolean_t free_rights = FALSE; 1261 1262 if (uio) 1263 resid = uio->uio_resid; 1264 else 1265 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1266 orig_resid = resid; 1267 1268 if (psa) 1269 *psa = NULL; 1270 if (controlp) 1271 *controlp = NULL; 1272 if (flagsp) 1273 flags = *flagsp &~ MSG_EOR; 1274 else 1275 flags = 0; 1276 if (flags & MSG_OOB) { 1277 m = m_get(M_WAITOK, MT_DATA); 1278 if (m == NULL) 1279 return (ENOBUFS); 1280 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1281 if (error) 1282 goto bad; 1283 if (sio) { 1284 do { 1285 sbappend(sio, m); 1286 KKASSERT(resid >= (size_t)m->m_len); 1287 resid -= (size_t)m->m_len; 1288 } while (resid > 0 && m); 1289 } else { 1290 do { 1291 uio->uio_resid = resid; 1292 error = uiomove(mtod(m, caddr_t), 1293 (int)szmin(resid, m->m_len), 1294 uio); 1295 resid = uio->uio_resid; 1296 m = m_free(m); 1297 } while (uio->uio_resid && error == 0 && m); 1298 } 1299 bad: 1300 if (m) 1301 m_freem(m); 1302 return (error); 1303 } 1304 if ((so->so_state & SS_ISCONFIRMING) && resid) 1305 so_pru_rcvd(so, 0); 1306 1307 /* 1308 * The token interlocks against the protocol thread while 1309 * ssb_lock is a blocking lock against other userland entities. 1310 */ 1311 lwkt_gettoken(&so->so_rcv.ssb_token); 1312 restart: 1313 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1314 if (error) 1315 goto done; 1316 1317 m = so->so_rcv.ssb_mb; 1318 /* 1319 * If we have less data than requested, block awaiting more 1320 * (subject to any timeout) if: 1321 * 1. the current count is less than the low water mark, or 1322 * 2. MSG_WAITALL is set, and it is possible to do the entire 1323 * receive operation at once if we block (resid <= hiwat). 1324 * 3. MSG_DONTWAIT is not set 1325 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1326 * we have to do the receive in sections, and thus risk returning 1327 * a short count if a timeout or signal occurs after we start. 1328 */ 1329 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1330 (size_t)so->so_rcv.ssb_cc < resid) && 1331 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1332 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1333 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1334 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1335 if (so->so_error) { 1336 if (m) 1337 goto dontblock; 1338 error = so->so_error; 1339 if ((flags & MSG_PEEK) == 0) 1340 so->so_error = 0; 1341 goto release; 1342 } 1343 if (so->so_state & SS_CANTRCVMORE) { 1344 if (m) 1345 goto dontblock; 1346 else 1347 goto release; 1348 } 1349 for (; m; m = m->m_next) { 1350 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1351 m = so->so_rcv.ssb_mb; 1352 goto dontblock; 1353 } 1354 } 1355 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1356 (pr->pr_flags & PR_CONNREQUIRED)) { 1357 error = ENOTCONN; 1358 goto release; 1359 } 1360 if (resid == 0) 1361 goto release; 1362 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1363 error = EWOULDBLOCK; 1364 goto release; 1365 } 1366 ssb_unlock(&so->so_rcv); 1367 error = ssb_wait(&so->so_rcv); 1368 if (error) 1369 goto done; 1370 goto restart; 1371 } 1372 dontblock: 1373 if (uio && uio->uio_td && uio->uio_td->td_proc) 1374 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1375 1376 /* 1377 * note: m should be == sb_mb here. Cache the next record while 1378 * cleaning up. Note that calling m_free*() will break out critical 1379 * section. 1380 */ 1381 KKASSERT(m == so->so_rcv.ssb_mb); 1382 1383 /* 1384 * Skip any address mbufs prepending the record. 1385 */ 1386 if (pr->pr_flags & PR_ADDR) { 1387 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1388 orig_resid = 0; 1389 if (psa) 1390 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1391 if (flags & MSG_PEEK) 1392 m = m->m_next; 1393 else 1394 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1395 } 1396 1397 /* 1398 * Skip any control mbufs prepending the record. 1399 */ 1400 while (m && m->m_type == MT_CONTROL && error == 0) { 1401 if (flags & MSG_PEEK) { 1402 if (controlp) 1403 *controlp = m_copy(m, 0, m->m_len); 1404 m = m->m_next; /* XXX race */ 1405 } else { 1406 const struct cmsghdr *cm = mtod(m, struct cmsghdr *); 1407 1408 if (controlp) { 1409 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1410 if (pr->pr_domain->dom_externalize && 1411 cm->cmsg_level == SOL_SOCKET && 1412 cm->cmsg_type == SCM_RIGHTS) { 1413 error = pr->pr_domain->dom_externalize 1414 (m, flags); 1415 } 1416 *controlp = m; 1417 m = n; 1418 } else { 1419 if (cm->cmsg_level == SOL_SOCKET && 1420 cm->cmsg_type == SCM_RIGHTS) 1421 free_rights = TRUE; 1422 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1423 } 1424 } 1425 if (controlp && *controlp) { 1426 orig_resid = 0; 1427 controlp = &(*controlp)->m_next; 1428 } 1429 } 1430 1431 /* 1432 * flag OOB data. 1433 */ 1434 if (m) { 1435 type = m->m_type; 1436 if (type == MT_OOBDATA) 1437 flags |= MSG_OOB; 1438 } 1439 1440 /* 1441 * Copy to the UIO or mbuf return chain (*mp). 1442 */ 1443 moff = 0; 1444 offset = 0; 1445 while (m && resid > 0 && error == 0) { 1446 if (m->m_type == MT_OOBDATA) { 1447 if (type != MT_OOBDATA) 1448 break; 1449 } else if (type == MT_OOBDATA) 1450 break; 1451 else 1452 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1453 ("receive 3")); 1454 soclrstate(so, SS_RCVATMARK); 1455 len = (resid > INT_MAX) ? INT_MAX : resid; 1456 if (so->so_oobmark && len > so->so_oobmark - offset) 1457 len = so->so_oobmark - offset; 1458 if (len > m->m_len - moff) 1459 len = m->m_len - moff; 1460 1461 /* 1462 * Copy out to the UIO or pass the mbufs back to the SIO. 1463 * The SIO is dealt with when we eat the mbuf, but deal 1464 * with the resid here either way. 1465 */ 1466 if (uio) { 1467 uio->uio_resid = resid; 1468 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1469 resid = uio->uio_resid; 1470 if (error) 1471 goto release; 1472 } else { 1473 resid -= (size_t)len; 1474 } 1475 1476 /* 1477 * Eat the entire mbuf or just a piece of it 1478 */ 1479 if (len == m->m_len - moff) { 1480 if (m->m_flags & M_EOR) 1481 flags |= MSG_EOR; 1482 if (flags & MSG_PEEK) { 1483 m = m->m_next; 1484 moff = 0; 1485 } else { 1486 if (sio) { 1487 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1488 sbappend(sio, m); 1489 m = n; 1490 } else { 1491 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1492 } 1493 } 1494 } else { 1495 if (flags & MSG_PEEK) { 1496 moff += len; 1497 } else { 1498 if (sio) { 1499 n = m_copym(m, 0, len, M_WAITOK); 1500 if (n) 1501 sbappend(sio, n); 1502 } 1503 m->m_data += len; 1504 m->m_len -= len; 1505 so->so_rcv.ssb_cc -= len; 1506 } 1507 } 1508 if (so->so_oobmark) { 1509 if ((flags & MSG_PEEK) == 0) { 1510 so->so_oobmark -= len; 1511 if (so->so_oobmark == 0) { 1512 sosetstate(so, SS_RCVATMARK); 1513 break; 1514 } 1515 } else { 1516 offset += len; 1517 if (offset == so->so_oobmark) 1518 break; 1519 } 1520 } 1521 if (flags & MSG_EOR) 1522 break; 1523 /* 1524 * If the MSG_WAITALL flag is set (for non-atomic socket), 1525 * we must not quit until resid == 0 or an error 1526 * termination. If a signal/timeout occurs, return 1527 * with a short count but without error. 1528 * Keep signalsockbuf locked against other readers. 1529 */ 1530 while ((flags & MSG_WAITALL) && m == NULL && 1531 resid > 0 && !sosendallatonce(so) && 1532 so->so_rcv.ssb_mb == NULL) { 1533 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1534 break; 1535 /* 1536 * The window might have closed to zero, make 1537 * sure we send an ack now that we've drained 1538 * the buffer or we might end up blocking until 1539 * the idle takes over (5 seconds). 1540 */ 1541 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1542 so_pru_rcvd(so, flags); 1543 error = ssb_wait(&so->so_rcv); 1544 if (error) { 1545 ssb_unlock(&so->so_rcv); 1546 error = 0; 1547 goto done; 1548 } 1549 m = so->so_rcv.ssb_mb; 1550 } 1551 } 1552 1553 /* 1554 * If an atomic read was requested but unread data still remains 1555 * in the record, set MSG_TRUNC. 1556 */ 1557 if (m && pr->pr_flags & PR_ATOMIC) 1558 flags |= MSG_TRUNC; 1559 1560 /* 1561 * Cleanup. If an atomic read was requested drop any unread data. 1562 */ 1563 if ((flags & MSG_PEEK) == 0) { 1564 if (m && (pr->pr_flags & PR_ATOMIC)) 1565 sbdroprecord(&so->so_rcv.sb); 1566 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1567 so_pru_rcvd(so, flags); 1568 } 1569 1570 if (orig_resid == resid && orig_resid && 1571 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1572 ssb_unlock(&so->so_rcv); 1573 goto restart; 1574 } 1575 1576 if (flagsp) 1577 *flagsp |= flags; 1578 release: 1579 ssb_unlock(&so->so_rcv); 1580 done: 1581 lwkt_reltoken(&so->so_rcv.ssb_token); 1582 if (free_chain) { 1583 if (free_rights && (pr->pr_flags & PR_RIGHTS) && 1584 pr->pr_domain->dom_dispose) 1585 pr->pr_domain->dom_dispose(free_chain); 1586 m_freem(free_chain); 1587 } 1588 return (error); 1589 } 1590 1591 int 1592 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1593 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1594 { 1595 struct mbuf *m, *n; 1596 struct mbuf *free_chain = NULL; 1597 int flags, len, error, offset; 1598 struct protosw *pr = so->so_proto; 1599 int moff; 1600 int didoob; 1601 size_t resid, orig_resid, restmp; 1602 1603 if (uio) 1604 resid = uio->uio_resid; 1605 else 1606 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1607 orig_resid = resid; 1608 1609 if (psa) 1610 *psa = NULL; 1611 if (controlp) 1612 *controlp = NULL; 1613 if (flagsp) 1614 flags = *flagsp &~ MSG_EOR; 1615 else 1616 flags = 0; 1617 if (flags & MSG_OOB) { 1618 m = m_get(M_WAITOK, MT_DATA); 1619 if (m == NULL) 1620 return (ENOBUFS); 1621 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1622 if (error) 1623 goto bad; 1624 if (sio) { 1625 do { 1626 sbappend(sio, m); 1627 KKASSERT(resid >= (size_t)m->m_len); 1628 resid -= (size_t)m->m_len; 1629 } while (resid > 0 && m); 1630 } else { 1631 do { 1632 uio->uio_resid = resid; 1633 error = uiomove(mtod(m, caddr_t), 1634 (int)szmin(resid, m->m_len), 1635 uio); 1636 resid = uio->uio_resid; 1637 m = m_free(m); 1638 } while (uio->uio_resid && error == 0 && m); 1639 } 1640 bad: 1641 if (m) 1642 m_freem(m); 1643 return (error); 1644 } 1645 1646 /* 1647 * The token interlocks against the protocol thread while 1648 * ssb_lock is a blocking lock against other userland entities. 1649 * 1650 * Lock a limited number of mbufs (not all, so sbcompress() still 1651 * works well). The token is used as an interlock for sbwait() so 1652 * release it afterwords. 1653 */ 1654 restart: 1655 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1656 if (error) 1657 goto done; 1658 1659 lwkt_gettoken(&so->so_rcv.ssb_token); 1660 m = so->so_rcv.ssb_mb; 1661 1662 /* 1663 * If we have less data than requested, block awaiting more 1664 * (subject to any timeout) if: 1665 * 1. the current count is less than the low water mark, or 1666 * 2. MSG_WAITALL is set, and it is possible to do the entire 1667 * receive operation at once if we block (resid <= hiwat). 1668 * 3. MSG_DONTWAIT is not set 1669 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1670 * we have to do the receive in sections, and thus risk returning 1671 * a short count if a timeout or signal occurs after we start. 1672 */ 1673 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1674 (size_t)so->so_rcv.ssb_cc < resid) && 1675 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1676 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1677 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1678 if (so->so_error) { 1679 if (m) 1680 goto dontblock; 1681 lwkt_reltoken(&so->so_rcv.ssb_token); 1682 error = so->so_error; 1683 if ((flags & MSG_PEEK) == 0) 1684 so->so_error = 0; 1685 goto release; 1686 } 1687 if (so->so_state & SS_CANTRCVMORE) { 1688 if (m) 1689 goto dontblock; 1690 lwkt_reltoken(&so->so_rcv.ssb_token); 1691 goto release; 1692 } 1693 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1694 (pr->pr_flags & PR_CONNREQUIRED)) { 1695 lwkt_reltoken(&so->so_rcv.ssb_token); 1696 error = ENOTCONN; 1697 goto release; 1698 } 1699 if (resid == 0) { 1700 lwkt_reltoken(&so->so_rcv.ssb_token); 1701 goto release; 1702 } 1703 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1704 lwkt_reltoken(&so->so_rcv.ssb_token); 1705 error = EWOULDBLOCK; 1706 goto release; 1707 } 1708 ssb_unlock(&so->so_rcv); 1709 error = ssb_wait(&so->so_rcv); 1710 lwkt_reltoken(&so->so_rcv.ssb_token); 1711 if (error) 1712 goto done; 1713 goto restart; 1714 } 1715 1716 /* 1717 * Token still held 1718 */ 1719 dontblock: 1720 n = m; 1721 restmp = 0; 1722 while (n && restmp < resid) { 1723 n->m_flags |= M_SOLOCKED; 1724 restmp += n->m_len; 1725 if (n->m_next == NULL) 1726 n = n->m_nextpkt; 1727 else 1728 n = n->m_next; 1729 } 1730 1731 /* 1732 * Release token for loop 1733 */ 1734 lwkt_reltoken(&so->so_rcv.ssb_token); 1735 if (uio && uio->uio_td && uio->uio_td->td_proc) 1736 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1737 1738 /* 1739 * note: m should be == sb_mb here. Cache the next record while 1740 * cleaning up. Note that calling m_free*() will break out critical 1741 * section. 1742 */ 1743 KKASSERT(m == so->so_rcv.ssb_mb); 1744 1745 /* 1746 * Copy to the UIO or mbuf return chain (*mp). 1747 * 1748 * NOTE: Token is not held for loop 1749 */ 1750 moff = 0; 1751 offset = 0; 1752 didoob = 0; 1753 1754 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1755 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1756 ("receive 3")); 1757 1758 soclrstate(so, SS_RCVATMARK); 1759 len = (resid > INT_MAX) ? INT_MAX : resid; 1760 if (so->so_oobmark && len > so->so_oobmark - offset) 1761 len = so->so_oobmark - offset; 1762 if (len > m->m_len - moff) 1763 len = m->m_len - moff; 1764 1765 /* 1766 * Copy out to the UIO or pass the mbufs back to the SIO. 1767 * The SIO is dealt with when we eat the mbuf, but deal 1768 * with the resid here either way. 1769 */ 1770 if (uio) { 1771 uio->uio_resid = resid; 1772 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1773 resid = uio->uio_resid; 1774 if (error) 1775 goto release; 1776 } else { 1777 resid -= (size_t)len; 1778 } 1779 1780 /* 1781 * Eat the entire mbuf or just a piece of it 1782 */ 1783 offset += len; 1784 if (len == m->m_len - moff) { 1785 m = m->m_next; 1786 moff = 0; 1787 } else { 1788 moff += len; 1789 } 1790 1791 /* 1792 * Check oobmark 1793 */ 1794 if (so->so_oobmark && offset == so->so_oobmark) { 1795 didoob = 1; 1796 break; 1797 } 1798 } 1799 1800 /* 1801 * Synchronize sockbuf with data we read. 1802 * 1803 * NOTE: (m) is junk on entry (it could be left over from the 1804 * previous loop). 1805 */ 1806 if ((flags & MSG_PEEK) == 0) { 1807 lwkt_gettoken(&so->so_rcv.ssb_token); 1808 m = so->so_rcv.ssb_mb; 1809 while (m && offset >= m->m_len) { 1810 if (so->so_oobmark) { 1811 so->so_oobmark -= m->m_len; 1812 if (so->so_oobmark == 0) { 1813 sosetstate(so, SS_RCVATMARK); 1814 didoob = 1; 1815 } 1816 } 1817 offset -= m->m_len; 1818 if (sio) { 1819 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1820 sbappend(sio, m); 1821 m = n; 1822 } else { 1823 m = sbunlinkmbuf(&so->so_rcv.sb, 1824 m, &free_chain); 1825 } 1826 } 1827 if (offset) { 1828 KKASSERT(m); 1829 if (sio) { 1830 n = m_copym(m, 0, offset, M_WAITOK); 1831 if (n) 1832 sbappend(sio, n); 1833 } 1834 m->m_data += offset; 1835 m->m_len -= offset; 1836 so->so_rcv.ssb_cc -= offset; 1837 if (so->so_oobmark) { 1838 so->so_oobmark -= offset; 1839 if (so->so_oobmark == 0) { 1840 sosetstate(so, SS_RCVATMARK); 1841 didoob = 1; 1842 } 1843 } 1844 offset = 0; 1845 } 1846 lwkt_reltoken(&so->so_rcv.ssb_token); 1847 } 1848 1849 /* 1850 * If the MSG_WAITALL flag is set (for non-atomic socket), 1851 * we must not quit until resid == 0 or an error termination. 1852 * 1853 * If a signal/timeout occurs, return with a short count but without 1854 * error. 1855 * 1856 * Keep signalsockbuf locked against other readers. 1857 * 1858 * XXX if MSG_PEEK we currently do quit. 1859 */ 1860 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1861 didoob == 0 && resid > 0 && 1862 !sosendallatonce(so)) { 1863 lwkt_gettoken(&so->so_rcv.ssb_token); 1864 error = 0; 1865 while ((m = so->so_rcv.ssb_mb) == NULL) { 1866 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1867 error = so->so_error; 1868 break; 1869 } 1870 /* 1871 * The window might have closed to zero, make 1872 * sure we send an ack now that we've drained 1873 * the buffer or we might end up blocking until 1874 * the idle takes over (5 seconds). 1875 */ 1876 if (so->so_pcb) 1877 so_pru_rcvd_async(so); 1878 if (so->so_rcv.ssb_mb == NULL) 1879 error = ssb_wait(&so->so_rcv); 1880 if (error) { 1881 lwkt_reltoken(&so->so_rcv.ssb_token); 1882 ssb_unlock(&so->so_rcv); 1883 error = 0; 1884 goto done; 1885 } 1886 } 1887 if (m && error == 0) 1888 goto dontblock; 1889 lwkt_reltoken(&so->so_rcv.ssb_token); 1890 } 1891 1892 /* 1893 * Token not held here. 1894 * 1895 * Cleanup. If an atomic read was requested drop any unread data XXX 1896 */ 1897 if ((flags & MSG_PEEK) == 0) { 1898 if (so->so_pcb) 1899 so_pru_rcvd_async(so); 1900 } 1901 1902 if (orig_resid == resid && orig_resid && 1903 (so->so_state & SS_CANTRCVMORE) == 0) { 1904 ssb_unlock(&so->so_rcv); 1905 goto restart; 1906 } 1907 1908 if (flagsp) 1909 *flagsp |= flags; 1910 release: 1911 ssb_unlock(&so->so_rcv); 1912 done: 1913 if (free_chain) 1914 m_freem(free_chain); 1915 return (error); 1916 } 1917 1918 /* 1919 * Shut a socket down. Note that we do not get a frontend lock as we 1920 * want to be able to shut the socket down even if another thread is 1921 * blocked in a read(), thus waking it up. 1922 */ 1923 int 1924 soshutdown(struct socket *so, int how) 1925 { 1926 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1927 return (EINVAL); 1928 1929 if (how != SHUT_WR) { 1930 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1931 sorflush(so); 1932 /*ssb_unlock(&so->so_rcv);*/ 1933 } 1934 if (how != SHUT_RD) 1935 return (so_pru_shutdown(so)); 1936 return (0); 1937 } 1938 1939 void 1940 sorflush(struct socket *so) 1941 { 1942 struct signalsockbuf *ssb = &so->so_rcv; 1943 struct protosw *pr = so->so_proto; 1944 struct signalsockbuf asb; 1945 1946 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1947 1948 lwkt_gettoken(&ssb->ssb_token); 1949 socantrcvmore(so); 1950 asb = *ssb; 1951 1952 /* 1953 * Can't just blow up the ssb structure here 1954 */ 1955 bzero(&ssb->sb, sizeof(ssb->sb)); 1956 ssb->ssb_timeo = 0; 1957 ssb->ssb_lowat = 0; 1958 ssb->ssb_hiwat = 0; 1959 ssb->ssb_mbmax = 0; 1960 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1961 1962 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1963 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1964 ssb_release(&asb, so); 1965 1966 lwkt_reltoken(&ssb->ssb_token); 1967 } 1968 1969 #ifdef INET 1970 static int 1971 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1972 { 1973 struct accept_filter_arg *afap = NULL; 1974 struct accept_filter *afp; 1975 struct so_accf *af = so->so_accf; 1976 int error = 0; 1977 1978 /* do not set/remove accept filters on non listen sockets */ 1979 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1980 error = EINVAL; 1981 goto out; 1982 } 1983 1984 /* removing the filter */ 1985 if (sopt == NULL) { 1986 if (af != NULL) { 1987 if (af->so_accept_filter != NULL && 1988 af->so_accept_filter->accf_destroy != NULL) { 1989 af->so_accept_filter->accf_destroy(so); 1990 } 1991 if (af->so_accept_filter_str != NULL) { 1992 kfree(af->so_accept_filter_str, M_ACCF); 1993 } 1994 kfree(af, M_ACCF); 1995 so->so_accf = NULL; 1996 } 1997 so->so_options &= ~SO_ACCEPTFILTER; 1998 return (0); 1999 } 2000 /* adding a filter */ 2001 /* must remove previous filter first */ 2002 if (af != NULL) { 2003 error = EINVAL; 2004 goto out; 2005 } 2006 /* don't put large objects on the kernel stack */ 2007 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2008 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2009 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2010 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2011 if (error) 2012 goto out; 2013 afp = accept_filt_get(afap->af_name); 2014 if (afp == NULL) { 2015 error = ENOENT; 2016 goto out; 2017 } 2018 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2019 if (afp->accf_create != NULL) { 2020 if (afap->af_name[0] != '\0') { 2021 int len = strlen(afap->af_name) + 1; 2022 2023 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2024 M_WAITOK); 2025 strcpy(af->so_accept_filter_str, afap->af_name); 2026 } 2027 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2028 if (af->so_accept_filter_arg == NULL) { 2029 kfree(af->so_accept_filter_str, M_ACCF); 2030 kfree(af, M_ACCF); 2031 so->so_accf = NULL; 2032 error = EINVAL; 2033 goto out; 2034 } 2035 } 2036 af->so_accept_filter = afp; 2037 so->so_accf = af; 2038 so->so_options |= SO_ACCEPTFILTER; 2039 out: 2040 if (afap != NULL) 2041 kfree(afap, M_TEMP); 2042 return (error); 2043 } 2044 #endif /* INET */ 2045 2046 /* 2047 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2048 * an additional variant to handle the case where the option value needs 2049 * to be some kind of integer, but not a specific size. 2050 * In addition to their use here, these functions are also called by the 2051 * protocol-level pr_ctloutput() routines. 2052 */ 2053 int 2054 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2055 { 2056 return soopt_to_kbuf(sopt, buf, len, minlen); 2057 } 2058 2059 int 2060 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2061 { 2062 size_t valsize; 2063 2064 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2065 KKASSERT(kva_p(buf)); 2066 2067 /* 2068 * If the user gives us more than we wanted, we ignore it, 2069 * but if we don't get the minimum length the caller 2070 * wants, we return EINVAL. On success, sopt->sopt_valsize 2071 * is set to however much we actually retrieved. 2072 */ 2073 if ((valsize = sopt->sopt_valsize) < minlen) 2074 return EINVAL; 2075 if (valsize > len) 2076 sopt->sopt_valsize = valsize = len; 2077 2078 bcopy(sopt->sopt_val, buf, valsize); 2079 return 0; 2080 } 2081 2082 2083 int 2084 sosetopt(struct socket *so, struct sockopt *sopt) 2085 { 2086 int error, optval; 2087 struct linger l; 2088 struct timeval tv; 2089 u_long val; 2090 struct signalsockbuf *sotmp; 2091 2092 error = 0; 2093 sopt->sopt_dir = SOPT_SET; 2094 if (sopt->sopt_level != SOL_SOCKET) { 2095 if (so->so_proto && so->so_proto->pr_ctloutput) { 2096 return (so_pr_ctloutput(so, sopt)); 2097 } 2098 error = ENOPROTOOPT; 2099 } else { 2100 switch (sopt->sopt_name) { 2101 #ifdef INET 2102 case SO_ACCEPTFILTER: 2103 error = do_setopt_accept_filter(so, sopt); 2104 if (error) 2105 goto bad; 2106 break; 2107 #endif /* INET */ 2108 case SO_LINGER: 2109 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2110 if (error) 2111 goto bad; 2112 2113 so->so_linger = l.l_linger; 2114 if (l.l_onoff) 2115 so->so_options |= SO_LINGER; 2116 else 2117 so->so_options &= ~SO_LINGER; 2118 break; 2119 2120 case SO_DEBUG: 2121 case SO_KEEPALIVE: 2122 case SO_DONTROUTE: 2123 case SO_USELOOPBACK: 2124 case SO_BROADCAST: 2125 case SO_REUSEADDR: 2126 case SO_REUSEPORT: 2127 case SO_OOBINLINE: 2128 case SO_TIMESTAMP: 2129 case SO_NOSIGPIPE: 2130 error = sooptcopyin(sopt, &optval, sizeof optval, 2131 sizeof optval); 2132 if (error) 2133 goto bad; 2134 if (optval) 2135 so->so_options |= sopt->sopt_name; 2136 else 2137 so->so_options &= ~sopt->sopt_name; 2138 break; 2139 2140 case SO_SNDBUF: 2141 case SO_RCVBUF: 2142 case SO_SNDLOWAT: 2143 case SO_RCVLOWAT: 2144 error = sooptcopyin(sopt, &optval, sizeof optval, 2145 sizeof optval); 2146 if (error) 2147 goto bad; 2148 2149 /* 2150 * Values < 1 make no sense for any of these 2151 * options, so disallow them. 2152 */ 2153 if (optval < 1) { 2154 error = EINVAL; 2155 goto bad; 2156 } 2157 2158 switch (sopt->sopt_name) { 2159 case SO_SNDBUF: 2160 case SO_RCVBUF: 2161 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2162 &so->so_snd : &so->so_rcv, (u_long)optval, 2163 so, 2164 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2165 error = ENOBUFS; 2166 goto bad; 2167 } 2168 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2169 &so->so_snd : &so->so_rcv; 2170 atomic_clear_int(&sotmp->ssb_flags, 2171 SSB_AUTOSIZE); 2172 break; 2173 2174 /* 2175 * Make sure the low-water is never greater than 2176 * the high-water. 2177 */ 2178 case SO_SNDLOWAT: 2179 so->so_snd.ssb_lowat = 2180 (optval > so->so_snd.ssb_hiwat) ? 2181 so->so_snd.ssb_hiwat : optval; 2182 atomic_clear_int(&so->so_snd.ssb_flags, 2183 SSB_AUTOLOWAT); 2184 break; 2185 case SO_RCVLOWAT: 2186 so->so_rcv.ssb_lowat = 2187 (optval > so->so_rcv.ssb_hiwat) ? 2188 so->so_rcv.ssb_hiwat : optval; 2189 atomic_clear_int(&so->so_rcv.ssb_flags, 2190 SSB_AUTOLOWAT); 2191 break; 2192 } 2193 break; 2194 2195 case SO_SNDTIMEO: 2196 case SO_RCVTIMEO: 2197 error = sooptcopyin(sopt, &tv, sizeof tv, 2198 sizeof tv); 2199 if (error) 2200 goto bad; 2201 2202 /* assert(hz > 0); */ 2203 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2204 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2205 error = EDOM; 2206 goto bad; 2207 } 2208 /* assert(tick > 0); */ 2209 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2210 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2211 if (val > INT_MAX) { 2212 error = EDOM; 2213 goto bad; 2214 } 2215 if (val == 0 && tv.tv_usec != 0) 2216 val = 1; 2217 2218 switch (sopt->sopt_name) { 2219 case SO_SNDTIMEO: 2220 so->so_snd.ssb_timeo = val; 2221 break; 2222 case SO_RCVTIMEO: 2223 so->so_rcv.ssb_timeo = val; 2224 break; 2225 } 2226 break; 2227 default: 2228 error = ENOPROTOOPT; 2229 break; 2230 } 2231 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2232 (void) so_pr_ctloutput(so, sopt); 2233 } 2234 } 2235 bad: 2236 return (error); 2237 } 2238 2239 /* Helper routine for getsockopt */ 2240 int 2241 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2242 { 2243 soopt_from_kbuf(sopt, buf, len); 2244 return 0; 2245 } 2246 2247 void 2248 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2249 { 2250 size_t valsize; 2251 2252 if (len == 0) { 2253 sopt->sopt_valsize = 0; 2254 return; 2255 } 2256 2257 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2258 KKASSERT(kva_p(buf)); 2259 2260 /* 2261 * Documented get behavior is that we always return a value, 2262 * possibly truncated to fit in the user's buffer. 2263 * Traditional behavior is that we always tell the user 2264 * precisely how much we copied, rather than something useful 2265 * like the total amount we had available for her. 2266 * Note that this interface is not idempotent; the entire answer must 2267 * generated ahead of time. 2268 */ 2269 valsize = szmin(len, sopt->sopt_valsize); 2270 sopt->sopt_valsize = valsize; 2271 if (sopt->sopt_val != 0) { 2272 bcopy(buf, sopt->sopt_val, valsize); 2273 } 2274 } 2275 2276 int 2277 sogetopt(struct socket *so, struct sockopt *sopt) 2278 { 2279 int error, optval; 2280 long optval_l; 2281 struct linger l; 2282 struct timeval tv; 2283 #ifdef INET 2284 struct accept_filter_arg *afap; 2285 #endif 2286 2287 error = 0; 2288 sopt->sopt_dir = SOPT_GET; 2289 if (sopt->sopt_level != SOL_SOCKET) { 2290 if (so->so_proto && so->so_proto->pr_ctloutput) { 2291 return (so_pr_ctloutput(so, sopt)); 2292 } else 2293 return (ENOPROTOOPT); 2294 } else { 2295 switch (sopt->sopt_name) { 2296 #ifdef INET 2297 case SO_ACCEPTFILTER: 2298 if ((so->so_options & SO_ACCEPTCONN) == 0) 2299 return (EINVAL); 2300 afap = kmalloc(sizeof(*afap), M_TEMP, 2301 M_WAITOK | M_ZERO); 2302 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2303 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2304 if (so->so_accf->so_accept_filter_str != NULL) 2305 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2306 } 2307 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2308 kfree(afap, M_TEMP); 2309 break; 2310 #endif /* INET */ 2311 2312 case SO_LINGER: 2313 l.l_onoff = so->so_options & SO_LINGER; 2314 l.l_linger = so->so_linger; 2315 error = sooptcopyout(sopt, &l, sizeof l); 2316 break; 2317 2318 case SO_USELOOPBACK: 2319 case SO_DONTROUTE: 2320 case SO_DEBUG: 2321 case SO_KEEPALIVE: 2322 case SO_REUSEADDR: 2323 case SO_REUSEPORT: 2324 case SO_BROADCAST: 2325 case SO_OOBINLINE: 2326 case SO_TIMESTAMP: 2327 case SO_NOSIGPIPE: 2328 optval = so->so_options & sopt->sopt_name; 2329 integer: 2330 error = sooptcopyout(sopt, &optval, sizeof optval); 2331 break; 2332 2333 case SO_TYPE: 2334 optval = so->so_type; 2335 goto integer; 2336 2337 case SO_ERROR: 2338 optval = so->so_error; 2339 so->so_error = 0; 2340 goto integer; 2341 2342 case SO_SNDBUF: 2343 optval = so->so_snd.ssb_hiwat; 2344 goto integer; 2345 2346 case SO_RCVBUF: 2347 optval = so->so_rcv.ssb_hiwat; 2348 goto integer; 2349 2350 case SO_SNDLOWAT: 2351 optval = so->so_snd.ssb_lowat; 2352 goto integer; 2353 2354 case SO_RCVLOWAT: 2355 optval = so->so_rcv.ssb_lowat; 2356 goto integer; 2357 2358 case SO_SNDTIMEO: 2359 case SO_RCVTIMEO: 2360 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2361 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2362 2363 tv.tv_sec = optval / hz; 2364 tv.tv_usec = (optval % hz) * ustick; 2365 error = sooptcopyout(sopt, &tv, sizeof tv); 2366 break; 2367 2368 case SO_SNDSPACE: 2369 optval_l = ssb_space(&so->so_snd); 2370 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2371 break; 2372 2373 case SO_CPUHINT: 2374 optval = -1; /* no hint */ 2375 goto integer; 2376 2377 default: 2378 error = ENOPROTOOPT; 2379 break; 2380 } 2381 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2382 so_pr_ctloutput(so, sopt); 2383 return (error); 2384 } 2385 } 2386 2387 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2388 int 2389 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2390 { 2391 struct mbuf *m, *m_prev; 2392 int sopt_size = sopt->sopt_valsize, msize; 2393 2394 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2395 0, &msize); 2396 if (m == NULL) 2397 return (ENOBUFS); 2398 m->m_len = min(msize, sopt_size); 2399 sopt_size -= m->m_len; 2400 *mp = m; 2401 m_prev = m; 2402 2403 while (sopt_size > 0) { 2404 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2405 MT_DATA, 0, &msize); 2406 if (m == NULL) { 2407 m_freem(*mp); 2408 return (ENOBUFS); 2409 } 2410 m->m_len = min(msize, sopt_size); 2411 sopt_size -= m->m_len; 2412 m_prev->m_next = m; 2413 m_prev = m; 2414 } 2415 return (0); 2416 } 2417 2418 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2419 int 2420 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2421 { 2422 soopt_to_mbuf(sopt, m); 2423 return 0; 2424 } 2425 2426 void 2427 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2428 { 2429 size_t valsize; 2430 void *val; 2431 2432 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2433 KKASSERT(kva_p(m)); 2434 if (sopt->sopt_val == NULL) 2435 return; 2436 val = sopt->sopt_val; 2437 valsize = sopt->sopt_valsize; 2438 while (m != NULL && valsize >= m->m_len) { 2439 bcopy(val, mtod(m, char *), m->m_len); 2440 valsize -= m->m_len; 2441 val = (caddr_t)val + m->m_len; 2442 m = m->m_next; 2443 } 2444 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2445 panic("ip6_sooptmcopyin"); 2446 } 2447 2448 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2449 int 2450 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2451 { 2452 return soopt_from_mbuf(sopt, m); 2453 } 2454 2455 int 2456 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2457 { 2458 struct mbuf *m0 = m; 2459 size_t valsize = 0; 2460 size_t maxsize; 2461 void *val; 2462 2463 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2464 KKASSERT(kva_p(m)); 2465 if (sopt->sopt_val == NULL) 2466 return 0; 2467 val = sopt->sopt_val; 2468 maxsize = sopt->sopt_valsize; 2469 while (m != NULL && maxsize >= m->m_len) { 2470 bcopy(mtod(m, char *), val, m->m_len); 2471 maxsize -= m->m_len; 2472 val = (caddr_t)val + m->m_len; 2473 valsize += m->m_len; 2474 m = m->m_next; 2475 } 2476 if (m != NULL) { 2477 /* enough soopt buffer should be given from user-land */ 2478 m_freem(m0); 2479 return (EINVAL); 2480 } 2481 sopt->sopt_valsize = valsize; 2482 return 0; 2483 } 2484 2485 void 2486 sohasoutofband(struct socket *so) 2487 { 2488 if (so->so_sigio != NULL) 2489 pgsigio(so->so_sigio, SIGURG, 0); 2490 /* 2491 * NOTE: 2492 * There is no need to use NOTE_OOB as KNOTE hint here: 2493 * soread filter depends on so_oobmark and SS_RCVATMARK 2494 * so_state. NOTE_OOB would cause unnecessary penalty 2495 * in KNOTE, if there was knote processing contention. 2496 */ 2497 KNOTE(&so->so_rcv.ssb_kq.ki_note, 0); 2498 } 2499 2500 int 2501 sokqfilter(struct file *fp, struct knote *kn) 2502 { 2503 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2504 struct signalsockbuf *ssb; 2505 2506 switch (kn->kn_filter) { 2507 case EVFILT_READ: 2508 if (so->so_options & SO_ACCEPTCONN) 2509 kn->kn_fop = &solisten_filtops; 2510 else 2511 kn->kn_fop = &soread_filtops; 2512 ssb = &so->so_rcv; 2513 break; 2514 case EVFILT_WRITE: 2515 kn->kn_fop = &sowrite_filtops; 2516 ssb = &so->so_snd; 2517 break; 2518 case EVFILT_EXCEPT: 2519 kn->kn_fop = &soexcept_filtops; 2520 ssb = &so->so_rcv; 2521 break; 2522 default: 2523 return (EOPNOTSUPP); 2524 } 2525 2526 knote_insert(&ssb->ssb_kq.ki_note, kn); 2527 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2528 return (0); 2529 } 2530 2531 static void 2532 filt_sordetach(struct knote *kn) 2533 { 2534 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2535 2536 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2537 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2538 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2539 } 2540 2541 /*ARGSUSED*/ 2542 static int 2543 filt_soread(struct knote *kn, long hint __unused) 2544 { 2545 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2546 2547 if (kn->kn_sfflags & NOTE_OOB) { 2548 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2549 kn->kn_fflags |= NOTE_OOB; 2550 return (1); 2551 } 2552 return (0); 2553 } 2554 kn->kn_data = so->so_rcv.ssb_cc; 2555 2556 if (so->so_state & SS_CANTRCVMORE) { 2557 /* 2558 * Only set NODATA if all data has been exhausted. 2559 */ 2560 if (kn->kn_data == 0) 2561 kn->kn_flags |= EV_NODATA; 2562 kn->kn_flags |= EV_EOF; 2563 kn->kn_fflags = so->so_error; 2564 return (1); 2565 } 2566 if (so->so_error) /* temporary udp error */ 2567 return (1); 2568 if (kn->kn_sfflags & NOTE_LOWAT) 2569 return (kn->kn_data >= kn->kn_sdata); 2570 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2571 !TAILQ_EMPTY(&so->so_comp)); 2572 } 2573 2574 static void 2575 filt_sowdetach(struct knote *kn) 2576 { 2577 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2578 2579 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2580 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2581 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2582 } 2583 2584 /*ARGSUSED*/ 2585 static int 2586 filt_sowrite(struct knote *kn, long hint __unused) 2587 { 2588 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2589 2590 if (so->so_snd.ssb_flags & SSB_PREALLOC) 2591 kn->kn_data = ssb_space_prealloc(&so->so_snd); 2592 else 2593 kn->kn_data = ssb_space(&so->so_snd); 2594 2595 if (so->so_state & SS_CANTSENDMORE) { 2596 kn->kn_flags |= (EV_EOF | EV_NODATA); 2597 kn->kn_fflags = so->so_error; 2598 return (1); 2599 } 2600 if (so->so_error) /* temporary udp error */ 2601 return (1); 2602 if (((so->so_state & SS_ISCONNECTED) == 0) && 2603 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2604 return (0); 2605 if (kn->kn_sfflags & NOTE_LOWAT) 2606 return (kn->kn_data >= kn->kn_sdata); 2607 return (kn->kn_data >= so->so_snd.ssb_lowat); 2608 } 2609 2610 /*ARGSUSED*/ 2611 static int 2612 filt_solisten(struct knote *kn, long hint __unused) 2613 { 2614 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2615 2616 kn->kn_data = so->so_qlen; 2617 return (! TAILQ_EMPTY(&so->so_comp)); 2618 } 2619