1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 static int use_socreate_fast = 1; 152 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 153 &use_socreate_fast, 0, "Fast socket creation"); 154 155 /* 156 * Socket operation routines. 157 * These routines are called by the routines in 158 * sys_socket.c or from a system process, and 159 * implement the semantics of socket operations by 160 * switching out to the protocol specific routines. 161 */ 162 163 /* 164 * Get a socket structure, and initialize it. 165 * Note that it would probably be better to allocate socket 166 * and PCB at the same time, but I'm not convinced that all 167 * the protocols can be easily modified to do this. 168 */ 169 struct socket * 170 soalloc(int waitok, struct protosw *pr) 171 { 172 struct socket *so; 173 unsigned waitmask; 174 175 waitmask = waitok ? M_WAITOK : M_NOWAIT; 176 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 177 if (so) { 178 /* XXX race condition for reentrant kernel */ 179 so->so_proto = pr; 180 TAILQ_INIT(&so->so_aiojobq); 181 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 182 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 183 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 184 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 185 spin_init(&so->so_rcvd_spin, "soalloc"); 186 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 187 MSGF_DROPABLE | MSGF_PRIORITY, 188 so->so_proto->pr_usrreqs->pru_rcvd); 189 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 190 so->so_state = SS_NOFDREF; 191 so->so_refs = 1; 192 } 193 return so; 194 } 195 196 int 197 socreate(int dom, struct socket **aso, int type, 198 int proto, struct thread *td) 199 { 200 struct proc *p = td->td_proc; 201 struct protosw *prp; 202 struct socket *so; 203 struct pru_attach_info ai; 204 int error; 205 206 if (proto) 207 prp = pffindproto(dom, proto, type); 208 else 209 prp = pffindtype(dom, type); 210 211 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 212 return (EPROTONOSUPPORT); 213 214 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 215 prp->pr_domain->dom_family != PF_LOCAL && 216 prp->pr_domain->dom_family != PF_INET && 217 prp->pr_domain->dom_family != PF_INET6 && 218 prp->pr_domain->dom_family != PF_ROUTE) { 219 return (EPROTONOSUPPORT); 220 } 221 222 if (prp->pr_type != type) 223 return (EPROTOTYPE); 224 so = soalloc(p != NULL, prp); 225 if (so == NULL) 226 return (ENOBUFS); 227 228 /* 229 * Callers of socreate() presumably will connect up a descriptor 230 * and call soclose() if they cannot. This represents our so_refs 231 * (which should be 1) from soalloc(). 232 */ 233 soclrstate(so, SS_NOFDREF); 234 235 /* 236 * Set a default port for protocol processing. No action will occur 237 * on the socket on this port until an inpcb is attached to it and 238 * is able to match incoming packets, or until the socket becomes 239 * available to userland. 240 * 241 * We normally default the socket to the protocol thread on cpu 0, 242 * if protocol does not provide its own method to initialize the 243 * default port. 244 * 245 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 246 * thread and all pr_*()/pru_*() calls are executed synchronously. 247 */ 248 if (prp->pr_flags & PR_SYNC_PORT) 249 so->so_port = &netisr_sync_port; 250 else if (prp->pr_initport != NULL) 251 so->so_port = prp->pr_initport(); 252 else 253 so->so_port = netisr_cpuport(0); 254 255 TAILQ_INIT(&so->so_incomp); 256 TAILQ_INIT(&so->so_comp); 257 so->so_type = type; 258 so->so_cred = crhold(p->p_ucred); 259 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 260 ai.p_ucred = p->p_ucred; 261 ai.fd_rdir = p->p_fd->fd_rdir; 262 263 /* 264 * Auto-sizing of socket buffers is managed by the protocols and 265 * the appropriate flags must be set in the pru_attach function. 266 */ 267 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 268 error = so_pru_attach_fast(so, proto, &ai); 269 else 270 error = so_pru_attach(so, proto, &ai); 271 if (error) { 272 sosetstate(so, SS_NOFDREF); 273 sofree(so); /* from soalloc */ 274 return error; 275 } 276 277 /* 278 * NOTE: Returns referenced socket. 279 */ 280 *aso = so; 281 return (0); 282 } 283 284 int 285 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 286 { 287 int error; 288 289 error = so_pru_bind(so, nam, td); 290 return (error); 291 } 292 293 static void 294 sodealloc(struct socket *so) 295 { 296 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 297 /* TODO: assert accept queues are empty, after unix socket is fixed */ 298 299 if (so->so_rcv.ssb_hiwat) 300 (void)chgsbsize(so->so_cred->cr_uidinfo, 301 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 302 if (so->so_snd.ssb_hiwat) 303 (void)chgsbsize(so->so_cred->cr_uidinfo, 304 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 305 #ifdef INET 306 /* remove accept filter if present */ 307 if (so->so_accf != NULL) 308 do_setopt_accept_filter(so, NULL); 309 #endif /* INET */ 310 crfree(so->so_cred); 311 if (so->so_faddr != NULL) 312 kfree(so->so_faddr, M_SONAME); 313 kfree(so, M_SOCKET); 314 } 315 316 int 317 solisten(struct socket *so, int backlog, struct thread *td) 318 { 319 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 320 return (EINVAL); 321 322 lwkt_gettoken(&so->so_rcv.ssb_token); 323 if (TAILQ_EMPTY(&so->so_comp)) 324 so->so_options |= SO_ACCEPTCONN; 325 lwkt_reltoken(&so->so_rcv.ssb_token); 326 if (backlog < 0 || backlog > somaxconn) 327 backlog = somaxconn; 328 so->so_qlimit = backlog; 329 return so_pru_listen(so, td); 330 } 331 332 static void 333 soqflush(struct socket *so) 334 { 335 lwkt_getpooltoken(so); 336 if (so->so_options & SO_ACCEPTCONN) { 337 struct socket *sp; 338 339 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 340 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 341 SS_INCOMP); 342 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 343 so->so_incqlen--; 344 soclrstate(sp, SS_INCOMP); 345 soabort_async(sp, TRUE); 346 } 347 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 348 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 349 SS_COMP); 350 TAILQ_REMOVE(&so->so_comp, sp, so_list); 351 so->so_qlen--; 352 soclrstate(sp, SS_COMP); 353 soabort_async(sp, TRUE); 354 } 355 } 356 lwkt_relpooltoken(so); 357 } 358 359 /* 360 * Destroy a disconnected socket. This routine is a NOP if entities 361 * still have a reference on the socket: 362 * 363 * so_pcb - The protocol stack still has a reference 364 * SS_NOFDREF - There is no longer a file pointer reference 365 */ 366 void 367 sofree(struct socket *so) 368 { 369 struct socket *head; 370 371 /* 372 * This is a bit hackish at the moment. We need to interlock 373 * any accept queue we are on before we potentially lose the 374 * last reference to avoid races against a re-reference from 375 * someone operating on the queue. 376 */ 377 while ((head = so->so_head) != NULL) { 378 lwkt_getpooltoken(head); 379 if (so->so_head == head) 380 break; 381 lwkt_relpooltoken(head); 382 } 383 384 /* 385 * Arbitrage the last free. 386 */ 387 KKASSERT(so->so_refs > 0); 388 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 389 if (head) 390 lwkt_relpooltoken(head); 391 return; 392 } 393 394 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 395 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 396 397 if (head != NULL) { 398 /* 399 * We're done, remove ourselves from the accept queue we are 400 * on, if we are on one. 401 */ 402 if (so->so_state & SS_INCOMP) { 403 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 404 SS_INCOMP); 405 TAILQ_REMOVE(&head->so_incomp, so, so_list); 406 head->so_incqlen--; 407 } else if (so->so_state & SS_COMP) { 408 /* 409 * We must not decommission a socket that's 410 * on the accept(2) queue. If we do, then 411 * accept(2) may hang after select(2) indicated 412 * that the listening socket was ready. 413 */ 414 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 415 SS_COMP); 416 lwkt_relpooltoken(head); 417 return; 418 } else { 419 panic("sofree: not queued"); 420 } 421 soclrstate(so, SS_INCOMP); 422 so->so_head = NULL; 423 lwkt_relpooltoken(head); 424 } else { 425 /* Flush accept queues, if we are accepting. */ 426 soqflush(so); 427 } 428 ssb_release(&so->so_snd, so); 429 sorflush(so); 430 sodealloc(so); 431 } 432 433 /* 434 * Close a socket on last file table reference removal. 435 * Initiate disconnect if connected. 436 * Free socket when disconnect complete. 437 */ 438 int 439 soclose(struct socket *so, int fflag) 440 { 441 int error; 442 443 funsetown(&so->so_sigio); 444 sosetstate(so, SS_ISCLOSING); 445 if (!use_soclose_fast || 446 (so->so_proto->pr_flags & PR_SYNC_PORT) || 447 ((so->so_state & SS_ISCONNECTED) && 448 (so->so_options & SO_LINGER))) { 449 error = soclose_sync(so, fflag); 450 } else { 451 soclose_fast(so); 452 error = 0; 453 } 454 return error; 455 } 456 457 void 458 sodiscard(struct socket *so) 459 { 460 if (so->so_state & SS_NOFDREF) 461 panic("soclose: NOFDREF"); 462 sosetstate(so, SS_NOFDREF); /* take ref */ 463 } 464 465 /* 466 * Append the completed queue of head to head_inh (inherting listen socket). 467 */ 468 void 469 soinherit(struct socket *head, struct socket *head_inh) 470 { 471 boolean_t do_wakeup = FALSE; 472 473 KASSERT(head->so_options & SO_ACCEPTCONN, 474 ("head does not accept connection")); 475 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 476 ("head_inh does not accept connection")); 477 478 lwkt_getpooltoken(head); 479 lwkt_getpooltoken(head_inh); 480 481 if (head->so_qlen > 0) 482 do_wakeup = TRUE; 483 484 while (!TAILQ_EMPTY(&head->so_comp)) { 485 struct ucred *old_cr; 486 struct socket *sp; 487 488 sp = TAILQ_FIRST(&head->so_comp); 489 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 490 491 /* 492 * Remove this socket from the current listen socket 493 * completed queue. 494 */ 495 TAILQ_REMOVE(&head->so_comp, sp, so_list); 496 head->so_qlen--; 497 498 /* Save the old ucred for later free. */ 499 old_cr = sp->so_cred; 500 501 /* 502 * Install this socket to the inheriting listen socket 503 * completed queue. 504 */ 505 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 506 sp->so_head = head_inh; 507 508 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 509 head_inh->so_qlen++; 510 511 /* 512 * NOTE: 513 * crfree() may block and release the tokens temporarily. 514 * However, we are fine here, since the transition is done. 515 */ 516 crfree(old_cr); 517 } 518 519 lwkt_relpooltoken(head_inh); 520 lwkt_relpooltoken(head); 521 522 if (do_wakeup) { 523 /* 524 * "New" connections have arrived 525 */ 526 sorwakeup(head_inh); 527 wakeup(&head_inh->so_timeo); 528 } 529 } 530 531 static int 532 soclose_sync(struct socket *so, int fflag) 533 { 534 int error = 0; 535 536 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 537 so_pru_sync(so); /* unpend async prus */ 538 539 if (so->so_pcb == NULL) 540 goto discard; 541 542 if (so->so_state & SS_ISCONNECTED) { 543 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 544 error = sodisconnect(so); 545 if (error) 546 goto drop; 547 } 548 if (so->so_options & SO_LINGER) { 549 if ((so->so_state & SS_ISDISCONNECTING) && 550 (fflag & FNONBLOCK)) 551 goto drop; 552 while (so->so_state & SS_ISCONNECTED) { 553 error = tsleep(&so->so_timeo, PCATCH, 554 "soclos", so->so_linger * hz); 555 if (error) 556 break; 557 } 558 } 559 } 560 drop: 561 if (so->so_pcb) { 562 int error2; 563 564 error2 = so_pru_detach(so); 565 if (error2 == EJUSTRETURN) { 566 /* 567 * Protocol will call sodiscard() 568 * and sofree() for us. 569 */ 570 return error; 571 } 572 if (error == 0) 573 error = error2; 574 } 575 discard: 576 sodiscard(so); 577 sofree(so); /* dispose of ref */ 578 579 return (error); 580 } 581 582 static void 583 soclose_fast_handler(netmsg_t msg) 584 { 585 struct socket *so = msg->base.nm_so; 586 587 if (so->so_pcb == NULL) 588 goto discard; 589 590 if ((so->so_state & SS_ISCONNECTED) && 591 (so->so_state & SS_ISDISCONNECTING) == 0) 592 so_pru_disconnect_direct(so); 593 594 if (so->so_pcb) { 595 int error; 596 597 error = so_pru_detach_direct(so); 598 if (error == EJUSTRETURN) { 599 /* 600 * Protocol will call sodiscard() 601 * and sofree() for us. 602 */ 603 return; 604 } 605 } 606 discard: 607 sodiscard(so); 608 sofree(so); 609 } 610 611 static void 612 soclose_fast(struct socket *so) 613 { 614 struct netmsg_base *base = &so->so_clomsg; 615 616 netmsg_init(base, so, &netisr_apanic_rport, 0, 617 soclose_fast_handler); 618 lwkt_sendmsg(so->so_port, &base->lmsg); 619 } 620 621 /* 622 * Abort and destroy a socket. Only one abort can be in progress 623 * at any given moment. 624 */ 625 void 626 soabort_async(struct socket *so, boolean_t clr_head) 627 { 628 /* 629 * Keep a reference before clearing the so_head 630 * to avoid racing socket close in netisr. 631 */ 632 soreference(so); 633 if (clr_head) 634 so->so_head = NULL; 635 so_pru_abort_async(so); 636 } 637 638 void 639 soabort_direct(struct socket *so) 640 { 641 soreference(so); 642 so_pru_abort_direct(so); 643 } 644 645 /* 646 * so is passed in ref'd, which becomes owned by 647 * the cleared SS_NOFDREF flag. 648 */ 649 void 650 soaccept_generic(struct socket *so) 651 { 652 if ((so->so_state & SS_NOFDREF) == 0) 653 panic("soaccept: !NOFDREF"); 654 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 655 } 656 657 int 658 soaccept(struct socket *so, struct sockaddr **nam) 659 { 660 int error; 661 662 soaccept_generic(so); 663 error = so_pru_accept(so, nam); 664 return (error); 665 } 666 667 int 668 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 669 boolean_t sync) 670 { 671 int error; 672 673 if (so->so_options & SO_ACCEPTCONN) 674 return (EOPNOTSUPP); 675 /* 676 * If protocol is connection-based, can only connect once. 677 * Otherwise, if connected, try to disconnect first. 678 * This allows user to disconnect by connecting to, e.g., 679 * a null address. 680 */ 681 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 682 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 683 (error = sodisconnect(so)))) { 684 error = EISCONN; 685 } else { 686 /* 687 * Prevent accumulated error from previous connection 688 * from biting us. 689 */ 690 so->so_error = 0; 691 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 692 error = so_pru_connect_async(so, nam, td); 693 else 694 error = so_pru_connect(so, nam, td); 695 } 696 return (error); 697 } 698 699 int 700 soconnect2(struct socket *so1, struct socket *so2) 701 { 702 int error; 703 704 error = so_pru_connect2(so1, so2); 705 return (error); 706 } 707 708 int 709 sodisconnect(struct socket *so) 710 { 711 int error; 712 713 if ((so->so_state & SS_ISCONNECTED) == 0) { 714 error = ENOTCONN; 715 goto bad; 716 } 717 if (so->so_state & SS_ISDISCONNECTING) { 718 error = EALREADY; 719 goto bad; 720 } 721 error = so_pru_disconnect(so); 722 bad: 723 return (error); 724 } 725 726 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 727 /* 728 * Send on a socket. 729 * If send must go all at once and message is larger than 730 * send buffering, then hard error. 731 * Lock against other senders. 732 * If must go all at once and not enough room now, then 733 * inform user that this would block and do nothing. 734 * Otherwise, if nonblocking, send as much as possible. 735 * The data to be sent is described by "uio" if nonzero, 736 * otherwise by the mbuf chain "top" (which must be null 737 * if uio is not). Data provided in mbuf chain must be small 738 * enough to send all at once. 739 * 740 * Returns nonzero on error, timeout or signal; callers 741 * must check for short counts if EINTR/ERESTART are returned. 742 * Data and control buffers are freed on return. 743 */ 744 int 745 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 746 struct mbuf *top, struct mbuf *control, int flags, 747 struct thread *td) 748 { 749 struct mbuf **mp; 750 struct mbuf *m; 751 size_t resid; 752 int space, len; 753 int clen = 0, error, dontroute, mlen; 754 int atomic = sosendallatonce(so) || top; 755 int pru_flags; 756 757 if (uio) { 758 resid = uio->uio_resid; 759 } else { 760 resid = (size_t)top->m_pkthdr.len; 761 #ifdef INVARIANTS 762 len = 0; 763 for (m = top; m; m = m->m_next) 764 len += m->m_len; 765 KKASSERT(top->m_pkthdr.len == len); 766 #endif 767 } 768 769 /* 770 * WARNING! resid is unsigned, space and len are signed. space 771 * can wind up negative if the sockbuf is overcommitted. 772 * 773 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 774 * type sockets since that's an error. 775 */ 776 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 777 error = EINVAL; 778 goto out; 779 } 780 781 dontroute = 782 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 783 (so->so_proto->pr_flags & PR_ATOMIC); 784 if (td->td_lwp != NULL) 785 td->td_lwp->lwp_ru.ru_msgsnd++; 786 if (control) 787 clen = control->m_len; 788 #define gotoerr(errcode) { error = errcode; goto release; } 789 790 restart: 791 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 792 if (error) 793 goto out; 794 795 do { 796 if (so->so_state & SS_CANTSENDMORE) 797 gotoerr(EPIPE); 798 if (so->so_error) { 799 error = so->so_error; 800 so->so_error = 0; 801 goto release; 802 } 803 if ((so->so_state & SS_ISCONNECTED) == 0) { 804 /* 805 * `sendto' and `sendmsg' is allowed on a connection- 806 * based socket if it supports implied connect. 807 * Return ENOTCONN if not connected and no address is 808 * supplied. 809 */ 810 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 811 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 812 if ((so->so_state & SS_ISCONFIRMING) == 0 && 813 !(resid == 0 && clen != 0)) 814 gotoerr(ENOTCONN); 815 } else if (addr == NULL) 816 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 817 ENOTCONN : EDESTADDRREQ); 818 } 819 if ((atomic && resid > so->so_snd.ssb_hiwat) || 820 clen > so->so_snd.ssb_hiwat) { 821 gotoerr(EMSGSIZE); 822 } 823 space = ssb_space(&so->so_snd); 824 if (flags & MSG_OOB) 825 space += 1024; 826 if ((space < 0 || (size_t)space < resid + clen) && uio && 827 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 828 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 829 gotoerr(EWOULDBLOCK); 830 ssb_unlock(&so->so_snd); 831 error = ssb_wait(&so->so_snd); 832 if (error) 833 goto out; 834 goto restart; 835 } 836 mp = ⊤ 837 space -= clen; 838 do { 839 if (uio == NULL) { 840 /* 841 * Data is prepackaged in "top". 842 */ 843 resid = 0; 844 if (flags & MSG_EOR) 845 top->m_flags |= M_EOR; 846 } else do { 847 if (resid > INT_MAX) 848 resid = INT_MAX; 849 m = m_getl((int)resid, M_WAITOK, MT_DATA, 850 top == NULL ? M_PKTHDR : 0, &mlen); 851 if (top == NULL) { 852 m->m_pkthdr.len = 0; 853 m->m_pkthdr.rcvif = NULL; 854 } 855 len = imin((int)szmin(mlen, resid), space); 856 if (resid < MINCLSIZE) { 857 /* 858 * For datagram protocols, leave room 859 * for protocol headers in first mbuf. 860 */ 861 if (atomic && top == NULL && len < mlen) 862 MH_ALIGN(m, len); 863 } 864 space -= len; 865 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 866 resid = uio->uio_resid; 867 m->m_len = len; 868 *mp = m; 869 top->m_pkthdr.len += len; 870 if (error) 871 goto release; 872 mp = &m->m_next; 873 if (resid == 0) { 874 if (flags & MSG_EOR) 875 top->m_flags |= M_EOR; 876 break; 877 } 878 } while (space > 0 && atomic); 879 if (dontroute) 880 so->so_options |= SO_DONTROUTE; 881 if (flags & MSG_OOB) { 882 pru_flags = PRUS_OOB; 883 } else if ((flags & MSG_EOF) && 884 (so->so_proto->pr_flags & PR_IMPLOPCL) && 885 (resid == 0)) { 886 /* 887 * If the user set MSG_EOF, the protocol 888 * understands this flag and nothing left to 889 * send then use PRU_SEND_EOF instead of PRU_SEND. 890 */ 891 pru_flags = PRUS_EOF; 892 } else if (resid > 0 && space > 0) { 893 /* If there is more to send, set PRUS_MORETOCOME */ 894 pru_flags = PRUS_MORETOCOME; 895 } else { 896 pru_flags = 0; 897 } 898 /* 899 * XXX all the SS_CANTSENDMORE checks previously 900 * done could be out of date. We could have recieved 901 * a reset packet in an interrupt or maybe we slept 902 * while doing page faults in uiomove() etc. We could 903 * probably recheck again inside the splnet() protection 904 * here, but there are probably other places that this 905 * also happens. We must rethink this. 906 */ 907 error = so_pru_send(so, pru_flags, top, addr, control, td); 908 if (dontroute) 909 so->so_options &= ~SO_DONTROUTE; 910 clen = 0; 911 control = NULL; 912 top = NULL; 913 mp = ⊤ 914 if (error) 915 goto release; 916 } while (resid && space > 0); 917 } while (resid); 918 919 release: 920 ssb_unlock(&so->so_snd); 921 out: 922 if (top) 923 m_freem(top); 924 if (control) 925 m_freem(control); 926 return (error); 927 } 928 929 #ifdef INET 930 /* 931 * A specialization of sosend() for UDP based on protocol-specific knowledge: 932 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 933 * sosendallatonce() returns true, 934 * the "atomic" variable is true, 935 * and sosendudp() blocks until space is available for the entire send. 936 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 937 * PR_IMPLOPCL flags set. 938 * UDP has no out-of-band data. 939 * UDP has no control data. 940 * UDP does not support MSG_EOR. 941 */ 942 int 943 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 944 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 945 { 946 size_t resid; 947 int error, pru_flags = 0; 948 int space; 949 950 if (td->td_lwp != NULL) 951 td->td_lwp->lwp_ru.ru_msgsnd++; 952 if (control) 953 m_freem(control); 954 955 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 956 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 957 958 restart: 959 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 960 if (error) 961 goto out; 962 963 if (so->so_state & SS_CANTSENDMORE) 964 gotoerr(EPIPE); 965 if (so->so_error) { 966 error = so->so_error; 967 so->so_error = 0; 968 goto release; 969 } 970 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 971 gotoerr(EDESTADDRREQ); 972 if (resid > so->so_snd.ssb_hiwat) 973 gotoerr(EMSGSIZE); 974 space = ssb_space(&so->so_snd); 975 if (uio && (space < 0 || (size_t)space < resid)) { 976 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 977 gotoerr(EWOULDBLOCK); 978 ssb_unlock(&so->so_snd); 979 error = ssb_wait(&so->so_snd); 980 if (error) 981 goto out; 982 goto restart; 983 } 984 985 if (uio) { 986 int hdrlen = max_hdr; 987 988 /* 989 * We try to optimize out the additional mbuf 990 * allocations in M_PREPEND() on output path, e.g. 991 * - udp_output(), when it tries to prepend protocol 992 * headers. 993 * - Link layer output function, when it tries to 994 * prepend link layer header. 995 * 996 * This probably will not benefit any data that will 997 * be fragmented, so this optimization is only performed 998 * when the size of data and max size of protocol+link 999 * headers fit into one mbuf cluster. 1000 */ 1001 if (uio->uio_resid > MCLBYTES - hdrlen || 1002 !udp_sosend_prepend) { 1003 top = m_uiomove(uio); 1004 if (top == NULL) 1005 goto release; 1006 } else { 1007 int nsize; 1008 1009 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1010 MT_DATA, M_PKTHDR, &nsize); 1011 KASSERT(nsize >= uio->uio_resid + hdrlen, 1012 ("sosendudp invalid nsize %d, " 1013 "resid %zu, hdrlen %d", 1014 nsize, uio->uio_resid, hdrlen)); 1015 1016 top->m_len = uio->uio_resid; 1017 top->m_pkthdr.len = uio->uio_resid; 1018 top->m_data += hdrlen; 1019 1020 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1021 if (error) 1022 goto out; 1023 } 1024 } 1025 1026 if (flags & MSG_DONTROUTE) 1027 pru_flags |= PRUS_DONTROUTE; 1028 1029 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1030 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1031 error = 0; 1032 } else { 1033 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1034 } 1035 top = NULL; /* sent or freed in lower layer */ 1036 1037 release: 1038 ssb_unlock(&so->so_snd); 1039 out: 1040 if (top) 1041 m_freem(top); 1042 return (error); 1043 } 1044 1045 int 1046 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1047 struct mbuf *top, struct mbuf *control, int flags, 1048 struct thread *td) 1049 { 1050 struct mbuf **mp; 1051 struct mbuf *m; 1052 size_t resid; 1053 int space, len; 1054 int error, mlen; 1055 int allatonce; 1056 int pru_flags; 1057 1058 if (uio) { 1059 KKASSERT(top == NULL); 1060 allatonce = 0; 1061 resid = uio->uio_resid; 1062 } else { 1063 allatonce = 1; 1064 resid = (size_t)top->m_pkthdr.len; 1065 #ifdef INVARIANTS 1066 len = 0; 1067 for (m = top; m; m = m->m_next) 1068 len += m->m_len; 1069 KKASSERT(top->m_pkthdr.len == len); 1070 #endif 1071 } 1072 1073 /* 1074 * WARNING! resid is unsigned, space and len are signed. space 1075 * can wind up negative if the sockbuf is overcommitted. 1076 * 1077 * Also check to make sure that MSG_EOR isn't used on TCP 1078 */ 1079 if (flags & MSG_EOR) { 1080 error = EINVAL; 1081 goto out; 1082 } 1083 1084 if (control) { 1085 /* TCP doesn't do control messages (rights, creds, etc) */ 1086 if (control->m_len) { 1087 error = EINVAL; 1088 goto out; 1089 } 1090 m_freem(control); /* empty control, just free it */ 1091 control = NULL; 1092 } 1093 1094 if (td->td_lwp != NULL) 1095 td->td_lwp->lwp_ru.ru_msgsnd++; 1096 1097 #define gotoerr(errcode) { error = errcode; goto release; } 1098 1099 restart: 1100 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1101 if (error) 1102 goto out; 1103 1104 do { 1105 if (so->so_state & SS_CANTSENDMORE) 1106 gotoerr(EPIPE); 1107 if (so->so_error) { 1108 error = so->so_error; 1109 so->so_error = 0; 1110 goto release; 1111 } 1112 if ((so->so_state & SS_ISCONNECTED) == 0 && 1113 (so->so_state & SS_ISCONFIRMING) == 0) 1114 gotoerr(ENOTCONN); 1115 if (allatonce && resid > so->so_snd.ssb_hiwat) 1116 gotoerr(EMSGSIZE); 1117 1118 space = ssb_space_prealloc(&so->so_snd); 1119 if (flags & MSG_OOB) 1120 space += 1024; 1121 if ((space < 0 || (size_t)space < resid) && !allatonce && 1122 space < so->so_snd.ssb_lowat) { 1123 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1124 gotoerr(EWOULDBLOCK); 1125 ssb_unlock(&so->so_snd); 1126 error = ssb_wait(&so->so_snd); 1127 if (error) 1128 goto out; 1129 goto restart; 1130 } 1131 mp = ⊤ 1132 do { 1133 int cnt = 0, async = 0; 1134 1135 if (uio == NULL) { 1136 /* 1137 * Data is prepackaged in "top". 1138 */ 1139 resid = 0; 1140 } else do { 1141 if (resid > INT_MAX) 1142 resid = INT_MAX; 1143 if (tcp_sosend_jcluster) { 1144 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1145 top == NULL ? M_PKTHDR : 0, &mlen); 1146 } else { 1147 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1148 top == NULL ? M_PKTHDR : 0, &mlen); 1149 } 1150 if (top == NULL) { 1151 m->m_pkthdr.len = 0; 1152 m->m_pkthdr.rcvif = NULL; 1153 } 1154 len = imin((int)szmin(mlen, resid), space); 1155 space -= len; 1156 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1157 resid = uio->uio_resid; 1158 m->m_len = len; 1159 *mp = m; 1160 top->m_pkthdr.len += len; 1161 if (error) 1162 goto release; 1163 mp = &m->m_next; 1164 if (resid == 0) 1165 break; 1166 ++cnt; 1167 } while (space > 0 && cnt < tcp_sosend_agglim); 1168 1169 if (tcp_sosend_async) 1170 async = 1; 1171 1172 if (flags & MSG_OOB) { 1173 pru_flags = PRUS_OOB; 1174 async = 0; 1175 } else if ((flags & MSG_EOF) && resid == 0) { 1176 pru_flags = PRUS_EOF; 1177 } else if (resid > 0 && space > 0) { 1178 /* If there is more to send, set PRUS_MORETOCOME */ 1179 pru_flags = PRUS_MORETOCOME; 1180 async = 1; 1181 } else { 1182 pru_flags = 0; 1183 } 1184 1185 if (flags & MSG_SYNC) 1186 async = 0; 1187 1188 /* 1189 * XXX all the SS_CANTSENDMORE checks previously 1190 * done could be out of date. We could have recieved 1191 * a reset packet in an interrupt or maybe we slept 1192 * while doing page faults in uiomove() etc. We could 1193 * probably recheck again inside the splnet() protection 1194 * here, but there are probably other places that this 1195 * also happens. We must rethink this. 1196 */ 1197 for (m = top; m; m = m->m_next) 1198 ssb_preallocstream(&so->so_snd, m); 1199 if (!async) { 1200 error = so_pru_send(so, pru_flags, top, 1201 NULL, NULL, td); 1202 } else { 1203 so_pru_send_async(so, pru_flags, top, 1204 NULL, NULL, td); 1205 error = 0; 1206 } 1207 1208 top = NULL; 1209 mp = ⊤ 1210 if (error) 1211 goto release; 1212 } while (resid && space > 0); 1213 } while (resid); 1214 1215 release: 1216 ssb_unlock(&so->so_snd); 1217 out: 1218 if (top) 1219 m_freem(top); 1220 if (control) 1221 m_freem(control); 1222 return (error); 1223 } 1224 #endif 1225 1226 /* 1227 * Implement receive operations on a socket. 1228 * 1229 * We depend on the way that records are added to the signalsockbuf 1230 * by sbappend*. In particular, each record (mbufs linked through m_next) 1231 * must begin with an address if the protocol so specifies, 1232 * followed by an optional mbuf or mbufs containing ancillary data, 1233 * and then zero or more mbufs of data. 1234 * 1235 * Although the signalsockbuf is locked, new data may still be appended. 1236 * A token inside the ssb_lock deals with MP issues and still allows 1237 * the network to access the socket if we block in a uio. 1238 * 1239 * The caller may receive the data as a single mbuf chain by supplying 1240 * an mbuf **mp0 for use in returning the chain. The uio is then used 1241 * only for the count in uio_resid. 1242 */ 1243 int 1244 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1245 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1246 { 1247 struct mbuf *m, *n; 1248 struct mbuf *free_chain = NULL; 1249 int flags, len, error, offset; 1250 struct protosw *pr = so->so_proto; 1251 int moff, type = 0; 1252 size_t resid, orig_resid; 1253 1254 if (uio) 1255 resid = uio->uio_resid; 1256 else 1257 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1258 orig_resid = resid; 1259 1260 if (psa) 1261 *psa = NULL; 1262 if (controlp) 1263 *controlp = NULL; 1264 if (flagsp) 1265 flags = *flagsp &~ MSG_EOR; 1266 else 1267 flags = 0; 1268 if (flags & MSG_OOB) { 1269 m = m_get(M_WAITOK, MT_DATA); 1270 if (m == NULL) 1271 return (ENOBUFS); 1272 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1273 if (error) 1274 goto bad; 1275 if (sio) { 1276 do { 1277 sbappend(sio, m); 1278 KKASSERT(resid >= (size_t)m->m_len); 1279 resid -= (size_t)m->m_len; 1280 } while (resid > 0 && m); 1281 } else { 1282 do { 1283 uio->uio_resid = resid; 1284 error = uiomove(mtod(m, caddr_t), 1285 (int)szmin(resid, m->m_len), 1286 uio); 1287 resid = uio->uio_resid; 1288 m = m_free(m); 1289 } while (uio->uio_resid && error == 0 && m); 1290 } 1291 bad: 1292 if (m) 1293 m_freem(m); 1294 return (error); 1295 } 1296 if ((so->so_state & SS_ISCONFIRMING) && resid) 1297 so_pru_rcvd(so, 0); 1298 1299 /* 1300 * The token interlocks against the protocol thread while 1301 * ssb_lock is a blocking lock against other userland entities. 1302 */ 1303 lwkt_gettoken(&so->so_rcv.ssb_token); 1304 restart: 1305 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1306 if (error) 1307 goto done; 1308 1309 m = so->so_rcv.ssb_mb; 1310 /* 1311 * If we have less data than requested, block awaiting more 1312 * (subject to any timeout) if: 1313 * 1. the current count is less than the low water mark, or 1314 * 2. MSG_WAITALL is set, and it is possible to do the entire 1315 * receive operation at once if we block (resid <= hiwat). 1316 * 3. MSG_DONTWAIT is not set 1317 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1318 * we have to do the receive in sections, and thus risk returning 1319 * a short count if a timeout or signal occurs after we start. 1320 */ 1321 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1322 (size_t)so->so_rcv.ssb_cc < resid) && 1323 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1324 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1325 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1326 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1327 if (so->so_error) { 1328 if (m) 1329 goto dontblock; 1330 error = so->so_error; 1331 if ((flags & MSG_PEEK) == 0) 1332 so->so_error = 0; 1333 goto release; 1334 } 1335 if (so->so_state & SS_CANTRCVMORE) { 1336 if (m) 1337 goto dontblock; 1338 else 1339 goto release; 1340 } 1341 for (; m; m = m->m_next) { 1342 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1343 m = so->so_rcv.ssb_mb; 1344 goto dontblock; 1345 } 1346 } 1347 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1348 (pr->pr_flags & PR_CONNREQUIRED)) { 1349 error = ENOTCONN; 1350 goto release; 1351 } 1352 if (resid == 0) 1353 goto release; 1354 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1355 error = EWOULDBLOCK; 1356 goto release; 1357 } 1358 ssb_unlock(&so->so_rcv); 1359 error = ssb_wait(&so->so_rcv); 1360 if (error) 1361 goto done; 1362 goto restart; 1363 } 1364 dontblock: 1365 if (uio && uio->uio_td && uio->uio_td->td_proc) 1366 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1367 1368 /* 1369 * note: m should be == sb_mb here. Cache the next record while 1370 * cleaning up. Note that calling m_free*() will break out critical 1371 * section. 1372 */ 1373 KKASSERT(m == so->so_rcv.ssb_mb); 1374 1375 /* 1376 * Skip any address mbufs prepending the record. 1377 */ 1378 if (pr->pr_flags & PR_ADDR) { 1379 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1380 orig_resid = 0; 1381 if (psa) 1382 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1383 if (flags & MSG_PEEK) 1384 m = m->m_next; 1385 else 1386 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1387 } 1388 1389 /* 1390 * Skip any control mbufs prepending the record. 1391 */ 1392 while (m && m->m_type == MT_CONTROL && error == 0) { 1393 if (flags & MSG_PEEK) { 1394 if (controlp) 1395 *controlp = m_copy(m, 0, m->m_len); 1396 m = m->m_next; /* XXX race */ 1397 } else { 1398 if (controlp) { 1399 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1400 if (pr->pr_domain->dom_externalize && 1401 mtod(m, struct cmsghdr *)->cmsg_type == 1402 SCM_RIGHTS) 1403 error = (*pr->pr_domain->dom_externalize)(m); 1404 *controlp = m; 1405 m = n; 1406 } else { 1407 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1408 } 1409 } 1410 if (controlp && *controlp) { 1411 orig_resid = 0; 1412 controlp = &(*controlp)->m_next; 1413 } 1414 } 1415 1416 /* 1417 * flag OOB data. 1418 */ 1419 if (m) { 1420 type = m->m_type; 1421 if (type == MT_OOBDATA) 1422 flags |= MSG_OOB; 1423 } 1424 1425 /* 1426 * Copy to the UIO or mbuf return chain (*mp). 1427 */ 1428 moff = 0; 1429 offset = 0; 1430 while (m && resid > 0 && error == 0) { 1431 if (m->m_type == MT_OOBDATA) { 1432 if (type != MT_OOBDATA) 1433 break; 1434 } else if (type == MT_OOBDATA) 1435 break; 1436 else 1437 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1438 ("receive 3")); 1439 soclrstate(so, SS_RCVATMARK); 1440 len = (resid > INT_MAX) ? INT_MAX : resid; 1441 if (so->so_oobmark && len > so->so_oobmark - offset) 1442 len = so->so_oobmark - offset; 1443 if (len > m->m_len - moff) 1444 len = m->m_len - moff; 1445 1446 /* 1447 * Copy out to the UIO or pass the mbufs back to the SIO. 1448 * The SIO is dealt with when we eat the mbuf, but deal 1449 * with the resid here either way. 1450 */ 1451 if (uio) { 1452 uio->uio_resid = resid; 1453 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1454 resid = uio->uio_resid; 1455 if (error) 1456 goto release; 1457 } else { 1458 resid -= (size_t)len; 1459 } 1460 1461 /* 1462 * Eat the entire mbuf or just a piece of it 1463 */ 1464 if (len == m->m_len - moff) { 1465 if (m->m_flags & M_EOR) 1466 flags |= MSG_EOR; 1467 if (flags & MSG_PEEK) { 1468 m = m->m_next; 1469 moff = 0; 1470 } else { 1471 if (sio) { 1472 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1473 sbappend(sio, m); 1474 m = n; 1475 } else { 1476 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1477 } 1478 } 1479 } else { 1480 if (flags & MSG_PEEK) { 1481 moff += len; 1482 } else { 1483 if (sio) { 1484 n = m_copym(m, 0, len, M_WAITOK); 1485 if (n) 1486 sbappend(sio, n); 1487 } 1488 m->m_data += len; 1489 m->m_len -= len; 1490 so->so_rcv.ssb_cc -= len; 1491 } 1492 } 1493 if (so->so_oobmark) { 1494 if ((flags & MSG_PEEK) == 0) { 1495 so->so_oobmark -= len; 1496 if (so->so_oobmark == 0) { 1497 sosetstate(so, SS_RCVATMARK); 1498 break; 1499 } 1500 } else { 1501 offset += len; 1502 if (offset == so->so_oobmark) 1503 break; 1504 } 1505 } 1506 if (flags & MSG_EOR) 1507 break; 1508 /* 1509 * If the MSG_WAITALL flag is set (for non-atomic socket), 1510 * we must not quit until resid == 0 or an error 1511 * termination. If a signal/timeout occurs, return 1512 * with a short count but without error. 1513 * Keep signalsockbuf locked against other readers. 1514 */ 1515 while ((flags & MSG_WAITALL) && m == NULL && 1516 resid > 0 && !sosendallatonce(so) && 1517 so->so_rcv.ssb_mb == NULL) { 1518 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1519 break; 1520 /* 1521 * The window might have closed to zero, make 1522 * sure we send an ack now that we've drained 1523 * the buffer or we might end up blocking until 1524 * the idle takes over (5 seconds). 1525 */ 1526 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1527 so_pru_rcvd(so, flags); 1528 error = ssb_wait(&so->so_rcv); 1529 if (error) { 1530 ssb_unlock(&so->so_rcv); 1531 error = 0; 1532 goto done; 1533 } 1534 m = so->so_rcv.ssb_mb; 1535 } 1536 } 1537 1538 /* 1539 * If an atomic read was requested but unread data still remains 1540 * in the record, set MSG_TRUNC. 1541 */ 1542 if (m && pr->pr_flags & PR_ATOMIC) 1543 flags |= MSG_TRUNC; 1544 1545 /* 1546 * Cleanup. If an atomic read was requested drop any unread data. 1547 */ 1548 if ((flags & MSG_PEEK) == 0) { 1549 if (m && (pr->pr_flags & PR_ATOMIC)) 1550 sbdroprecord(&so->so_rcv.sb); 1551 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1552 so_pru_rcvd(so, flags); 1553 } 1554 1555 if (orig_resid == resid && orig_resid && 1556 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1557 ssb_unlock(&so->so_rcv); 1558 goto restart; 1559 } 1560 1561 if (flagsp) 1562 *flagsp |= flags; 1563 release: 1564 ssb_unlock(&so->so_rcv); 1565 done: 1566 lwkt_reltoken(&so->so_rcv.ssb_token); 1567 if (free_chain) 1568 m_freem(free_chain); 1569 return (error); 1570 } 1571 1572 int 1573 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1574 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1575 { 1576 struct mbuf *m, *n; 1577 struct mbuf *free_chain = NULL; 1578 int flags, len, error, offset; 1579 struct protosw *pr = so->so_proto; 1580 int moff; 1581 int didoob; 1582 size_t resid, orig_resid, restmp; 1583 1584 if (uio) 1585 resid = uio->uio_resid; 1586 else 1587 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1588 orig_resid = resid; 1589 1590 if (psa) 1591 *psa = NULL; 1592 if (controlp) 1593 *controlp = NULL; 1594 if (flagsp) 1595 flags = *flagsp &~ MSG_EOR; 1596 else 1597 flags = 0; 1598 if (flags & MSG_OOB) { 1599 m = m_get(M_WAITOK, MT_DATA); 1600 if (m == NULL) 1601 return (ENOBUFS); 1602 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1603 if (error) 1604 goto bad; 1605 if (sio) { 1606 do { 1607 sbappend(sio, m); 1608 KKASSERT(resid >= (size_t)m->m_len); 1609 resid -= (size_t)m->m_len; 1610 } while (resid > 0 && m); 1611 } else { 1612 do { 1613 uio->uio_resid = resid; 1614 error = uiomove(mtod(m, caddr_t), 1615 (int)szmin(resid, m->m_len), 1616 uio); 1617 resid = uio->uio_resid; 1618 m = m_free(m); 1619 } while (uio->uio_resid && error == 0 && m); 1620 } 1621 bad: 1622 if (m) 1623 m_freem(m); 1624 return (error); 1625 } 1626 1627 /* 1628 * The token interlocks against the protocol thread while 1629 * ssb_lock is a blocking lock against other userland entities. 1630 * 1631 * Lock a limited number of mbufs (not all, so sbcompress() still 1632 * works well). The token is used as an interlock for sbwait() so 1633 * release it afterwords. 1634 */ 1635 restart: 1636 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1637 if (error) 1638 goto done; 1639 1640 lwkt_gettoken(&so->so_rcv.ssb_token); 1641 m = so->so_rcv.ssb_mb; 1642 1643 /* 1644 * If we have less data than requested, block awaiting more 1645 * (subject to any timeout) if: 1646 * 1. the current count is less than the low water mark, or 1647 * 2. MSG_WAITALL is set, and it is possible to do the entire 1648 * receive operation at once if we block (resid <= hiwat). 1649 * 3. MSG_DONTWAIT is not set 1650 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1651 * we have to do the receive in sections, and thus risk returning 1652 * a short count if a timeout or signal occurs after we start. 1653 */ 1654 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1655 (size_t)so->so_rcv.ssb_cc < resid) && 1656 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1657 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1658 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1659 if (so->so_error) { 1660 if (m) 1661 goto dontblock; 1662 lwkt_reltoken(&so->so_rcv.ssb_token); 1663 error = so->so_error; 1664 if ((flags & MSG_PEEK) == 0) 1665 so->so_error = 0; 1666 goto release; 1667 } 1668 if (so->so_state & SS_CANTRCVMORE) { 1669 if (m) 1670 goto dontblock; 1671 lwkt_reltoken(&so->so_rcv.ssb_token); 1672 goto release; 1673 } 1674 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1675 (pr->pr_flags & PR_CONNREQUIRED)) { 1676 lwkt_reltoken(&so->so_rcv.ssb_token); 1677 error = ENOTCONN; 1678 goto release; 1679 } 1680 if (resid == 0) { 1681 lwkt_reltoken(&so->so_rcv.ssb_token); 1682 goto release; 1683 } 1684 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1685 lwkt_reltoken(&so->so_rcv.ssb_token); 1686 error = EWOULDBLOCK; 1687 goto release; 1688 } 1689 ssb_unlock(&so->so_rcv); 1690 error = ssb_wait(&so->so_rcv); 1691 lwkt_reltoken(&so->so_rcv.ssb_token); 1692 if (error) 1693 goto done; 1694 goto restart; 1695 } 1696 1697 /* 1698 * Token still held 1699 */ 1700 dontblock: 1701 n = m; 1702 restmp = 0; 1703 while (n && restmp < resid) { 1704 n->m_flags |= M_SOLOCKED; 1705 restmp += n->m_len; 1706 if (n->m_next == NULL) 1707 n = n->m_nextpkt; 1708 else 1709 n = n->m_next; 1710 } 1711 1712 /* 1713 * Release token for loop 1714 */ 1715 lwkt_reltoken(&so->so_rcv.ssb_token); 1716 if (uio && uio->uio_td && uio->uio_td->td_proc) 1717 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1718 1719 /* 1720 * note: m should be == sb_mb here. Cache the next record while 1721 * cleaning up. Note that calling m_free*() will break out critical 1722 * section. 1723 */ 1724 KKASSERT(m == so->so_rcv.ssb_mb); 1725 1726 /* 1727 * Copy to the UIO or mbuf return chain (*mp). 1728 * 1729 * NOTE: Token is not held for loop 1730 */ 1731 moff = 0; 1732 offset = 0; 1733 didoob = 0; 1734 1735 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1736 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1737 ("receive 3")); 1738 1739 soclrstate(so, SS_RCVATMARK); 1740 len = (resid > INT_MAX) ? INT_MAX : resid; 1741 if (so->so_oobmark && len > so->so_oobmark - offset) 1742 len = so->so_oobmark - offset; 1743 if (len > m->m_len - moff) 1744 len = m->m_len - moff; 1745 1746 /* 1747 * Copy out to the UIO or pass the mbufs back to the SIO. 1748 * The SIO is dealt with when we eat the mbuf, but deal 1749 * with the resid here either way. 1750 */ 1751 if (uio) { 1752 uio->uio_resid = resid; 1753 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1754 resid = uio->uio_resid; 1755 if (error) 1756 goto release; 1757 } else { 1758 resid -= (size_t)len; 1759 } 1760 1761 /* 1762 * Eat the entire mbuf or just a piece of it 1763 */ 1764 offset += len; 1765 if (len == m->m_len - moff) { 1766 m = m->m_next; 1767 moff = 0; 1768 } else { 1769 moff += len; 1770 } 1771 1772 /* 1773 * Check oobmark 1774 */ 1775 if (so->so_oobmark && offset == so->so_oobmark) { 1776 didoob = 1; 1777 break; 1778 } 1779 } 1780 1781 /* 1782 * Synchronize sockbuf with data we read. 1783 * 1784 * NOTE: (m) is junk on entry (it could be left over from the 1785 * previous loop). 1786 */ 1787 if ((flags & MSG_PEEK) == 0) { 1788 lwkt_gettoken(&so->so_rcv.ssb_token); 1789 m = so->so_rcv.ssb_mb; 1790 while (m && offset >= m->m_len) { 1791 if (so->so_oobmark) { 1792 so->so_oobmark -= m->m_len; 1793 if (so->so_oobmark == 0) { 1794 sosetstate(so, SS_RCVATMARK); 1795 didoob = 1; 1796 } 1797 } 1798 offset -= m->m_len; 1799 if (sio) { 1800 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1801 sbappend(sio, m); 1802 m = n; 1803 } else { 1804 m = sbunlinkmbuf(&so->so_rcv.sb, 1805 m, &free_chain); 1806 } 1807 } 1808 if (offset) { 1809 KKASSERT(m); 1810 if (sio) { 1811 n = m_copym(m, 0, offset, M_WAITOK); 1812 if (n) 1813 sbappend(sio, n); 1814 } 1815 m->m_data += offset; 1816 m->m_len -= offset; 1817 so->so_rcv.ssb_cc -= offset; 1818 if (so->so_oobmark) { 1819 so->so_oobmark -= offset; 1820 if (so->so_oobmark == 0) { 1821 sosetstate(so, SS_RCVATMARK); 1822 didoob = 1; 1823 } 1824 } 1825 offset = 0; 1826 } 1827 lwkt_reltoken(&so->so_rcv.ssb_token); 1828 } 1829 1830 /* 1831 * If the MSG_WAITALL flag is set (for non-atomic socket), 1832 * we must not quit until resid == 0 or an error termination. 1833 * 1834 * If a signal/timeout occurs, return with a short count but without 1835 * error. 1836 * 1837 * Keep signalsockbuf locked against other readers. 1838 * 1839 * XXX if MSG_PEEK we currently do quit. 1840 */ 1841 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1842 didoob == 0 && resid > 0 && 1843 !sosendallatonce(so)) { 1844 lwkt_gettoken(&so->so_rcv.ssb_token); 1845 error = 0; 1846 while ((m = so->so_rcv.ssb_mb) == NULL) { 1847 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1848 error = so->so_error; 1849 break; 1850 } 1851 /* 1852 * The window might have closed to zero, make 1853 * sure we send an ack now that we've drained 1854 * the buffer or we might end up blocking until 1855 * the idle takes over (5 seconds). 1856 */ 1857 if (so->so_pcb) 1858 so_pru_rcvd_async(so); 1859 if (so->so_rcv.ssb_mb == NULL) 1860 error = ssb_wait(&so->so_rcv); 1861 if (error) { 1862 lwkt_reltoken(&so->so_rcv.ssb_token); 1863 ssb_unlock(&so->so_rcv); 1864 error = 0; 1865 goto done; 1866 } 1867 } 1868 if (m && error == 0) 1869 goto dontblock; 1870 lwkt_reltoken(&so->so_rcv.ssb_token); 1871 } 1872 1873 /* 1874 * Token not held here. 1875 * 1876 * Cleanup. If an atomic read was requested drop any unread data XXX 1877 */ 1878 if ((flags & MSG_PEEK) == 0) { 1879 if (so->so_pcb) 1880 so_pru_rcvd_async(so); 1881 } 1882 1883 if (orig_resid == resid && orig_resid && 1884 (so->so_state & SS_CANTRCVMORE) == 0) { 1885 ssb_unlock(&so->so_rcv); 1886 goto restart; 1887 } 1888 1889 if (flagsp) 1890 *flagsp |= flags; 1891 release: 1892 ssb_unlock(&so->so_rcv); 1893 done: 1894 if (free_chain) 1895 m_freem(free_chain); 1896 return (error); 1897 } 1898 1899 /* 1900 * Shut a socket down. Note that we do not get a frontend lock as we 1901 * want to be able to shut the socket down even if another thread is 1902 * blocked in a read(), thus waking it up. 1903 */ 1904 int 1905 soshutdown(struct socket *so, int how) 1906 { 1907 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1908 return (EINVAL); 1909 1910 if (how != SHUT_WR) { 1911 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1912 sorflush(so); 1913 /*ssb_unlock(&so->so_rcv);*/ 1914 } 1915 if (how != SHUT_RD) 1916 return (so_pru_shutdown(so)); 1917 return (0); 1918 } 1919 1920 void 1921 sorflush(struct socket *so) 1922 { 1923 struct signalsockbuf *ssb = &so->so_rcv; 1924 struct protosw *pr = so->so_proto; 1925 struct signalsockbuf asb; 1926 1927 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1928 1929 lwkt_gettoken(&ssb->ssb_token); 1930 socantrcvmore(so); 1931 asb = *ssb; 1932 1933 /* 1934 * Can't just blow up the ssb structure here 1935 */ 1936 bzero(&ssb->sb, sizeof(ssb->sb)); 1937 ssb->ssb_timeo = 0; 1938 ssb->ssb_lowat = 0; 1939 ssb->ssb_hiwat = 0; 1940 ssb->ssb_mbmax = 0; 1941 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1942 1943 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1944 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1945 ssb_release(&asb, so); 1946 1947 lwkt_reltoken(&ssb->ssb_token); 1948 } 1949 1950 #ifdef INET 1951 static int 1952 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1953 { 1954 struct accept_filter_arg *afap = NULL; 1955 struct accept_filter *afp; 1956 struct so_accf *af = so->so_accf; 1957 int error = 0; 1958 1959 /* do not set/remove accept filters on non listen sockets */ 1960 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1961 error = EINVAL; 1962 goto out; 1963 } 1964 1965 /* removing the filter */ 1966 if (sopt == NULL) { 1967 if (af != NULL) { 1968 if (af->so_accept_filter != NULL && 1969 af->so_accept_filter->accf_destroy != NULL) { 1970 af->so_accept_filter->accf_destroy(so); 1971 } 1972 if (af->so_accept_filter_str != NULL) { 1973 kfree(af->so_accept_filter_str, M_ACCF); 1974 } 1975 kfree(af, M_ACCF); 1976 so->so_accf = NULL; 1977 } 1978 so->so_options &= ~SO_ACCEPTFILTER; 1979 return (0); 1980 } 1981 /* adding a filter */ 1982 /* must remove previous filter first */ 1983 if (af != NULL) { 1984 error = EINVAL; 1985 goto out; 1986 } 1987 /* don't put large objects on the kernel stack */ 1988 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 1989 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1990 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1991 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1992 if (error) 1993 goto out; 1994 afp = accept_filt_get(afap->af_name); 1995 if (afp == NULL) { 1996 error = ENOENT; 1997 goto out; 1998 } 1999 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2000 if (afp->accf_create != NULL) { 2001 if (afap->af_name[0] != '\0') { 2002 int len = strlen(afap->af_name) + 1; 2003 2004 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2005 M_WAITOK); 2006 strcpy(af->so_accept_filter_str, afap->af_name); 2007 } 2008 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2009 if (af->so_accept_filter_arg == NULL) { 2010 kfree(af->so_accept_filter_str, M_ACCF); 2011 kfree(af, M_ACCF); 2012 so->so_accf = NULL; 2013 error = EINVAL; 2014 goto out; 2015 } 2016 } 2017 af->so_accept_filter = afp; 2018 so->so_accf = af; 2019 so->so_options |= SO_ACCEPTFILTER; 2020 out: 2021 if (afap != NULL) 2022 kfree(afap, M_TEMP); 2023 return (error); 2024 } 2025 #endif /* INET */ 2026 2027 /* 2028 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2029 * an additional variant to handle the case where the option value needs 2030 * to be some kind of integer, but not a specific size. 2031 * In addition to their use here, these functions are also called by the 2032 * protocol-level pr_ctloutput() routines. 2033 */ 2034 int 2035 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2036 { 2037 return soopt_to_kbuf(sopt, buf, len, minlen); 2038 } 2039 2040 int 2041 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2042 { 2043 size_t valsize; 2044 2045 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2046 KKASSERT(kva_p(buf)); 2047 2048 /* 2049 * If the user gives us more than we wanted, we ignore it, 2050 * but if we don't get the minimum length the caller 2051 * wants, we return EINVAL. On success, sopt->sopt_valsize 2052 * is set to however much we actually retrieved. 2053 */ 2054 if ((valsize = sopt->sopt_valsize) < minlen) 2055 return EINVAL; 2056 if (valsize > len) 2057 sopt->sopt_valsize = valsize = len; 2058 2059 bcopy(sopt->sopt_val, buf, valsize); 2060 return 0; 2061 } 2062 2063 2064 int 2065 sosetopt(struct socket *so, struct sockopt *sopt) 2066 { 2067 int error, optval; 2068 struct linger l; 2069 struct timeval tv; 2070 u_long val; 2071 struct signalsockbuf *sotmp; 2072 2073 error = 0; 2074 sopt->sopt_dir = SOPT_SET; 2075 if (sopt->sopt_level != SOL_SOCKET) { 2076 if (so->so_proto && so->so_proto->pr_ctloutput) { 2077 return (so_pr_ctloutput(so, sopt)); 2078 } 2079 error = ENOPROTOOPT; 2080 } else { 2081 switch (sopt->sopt_name) { 2082 #ifdef INET 2083 case SO_ACCEPTFILTER: 2084 error = do_setopt_accept_filter(so, sopt); 2085 if (error) 2086 goto bad; 2087 break; 2088 #endif /* INET */ 2089 case SO_LINGER: 2090 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2091 if (error) 2092 goto bad; 2093 2094 so->so_linger = l.l_linger; 2095 if (l.l_onoff) 2096 so->so_options |= SO_LINGER; 2097 else 2098 so->so_options &= ~SO_LINGER; 2099 break; 2100 2101 case SO_DEBUG: 2102 case SO_KEEPALIVE: 2103 case SO_DONTROUTE: 2104 case SO_USELOOPBACK: 2105 case SO_BROADCAST: 2106 case SO_REUSEADDR: 2107 case SO_REUSEPORT: 2108 case SO_OOBINLINE: 2109 case SO_TIMESTAMP: 2110 case SO_NOSIGPIPE: 2111 error = sooptcopyin(sopt, &optval, sizeof optval, 2112 sizeof optval); 2113 if (error) 2114 goto bad; 2115 if (optval) 2116 so->so_options |= sopt->sopt_name; 2117 else 2118 so->so_options &= ~sopt->sopt_name; 2119 break; 2120 2121 case SO_SNDBUF: 2122 case SO_RCVBUF: 2123 case SO_SNDLOWAT: 2124 case SO_RCVLOWAT: 2125 error = sooptcopyin(sopt, &optval, sizeof optval, 2126 sizeof optval); 2127 if (error) 2128 goto bad; 2129 2130 /* 2131 * Values < 1 make no sense for any of these 2132 * options, so disallow them. 2133 */ 2134 if (optval < 1) { 2135 error = EINVAL; 2136 goto bad; 2137 } 2138 2139 switch (sopt->sopt_name) { 2140 case SO_SNDBUF: 2141 case SO_RCVBUF: 2142 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2143 &so->so_snd : &so->so_rcv, (u_long)optval, 2144 so, 2145 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2146 error = ENOBUFS; 2147 goto bad; 2148 } 2149 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2150 &so->so_snd : &so->so_rcv; 2151 atomic_clear_int(&sotmp->ssb_flags, 2152 SSB_AUTOSIZE); 2153 break; 2154 2155 /* 2156 * Make sure the low-water is never greater than 2157 * the high-water. 2158 */ 2159 case SO_SNDLOWAT: 2160 so->so_snd.ssb_lowat = 2161 (optval > so->so_snd.ssb_hiwat) ? 2162 so->so_snd.ssb_hiwat : optval; 2163 atomic_clear_int(&so->so_snd.ssb_flags, 2164 SSB_AUTOLOWAT); 2165 break; 2166 case SO_RCVLOWAT: 2167 so->so_rcv.ssb_lowat = 2168 (optval > so->so_rcv.ssb_hiwat) ? 2169 so->so_rcv.ssb_hiwat : optval; 2170 atomic_clear_int(&so->so_rcv.ssb_flags, 2171 SSB_AUTOLOWAT); 2172 break; 2173 } 2174 break; 2175 2176 case SO_SNDTIMEO: 2177 case SO_RCVTIMEO: 2178 error = sooptcopyin(sopt, &tv, sizeof tv, 2179 sizeof tv); 2180 if (error) 2181 goto bad; 2182 2183 /* assert(hz > 0); */ 2184 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2185 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2186 error = EDOM; 2187 goto bad; 2188 } 2189 /* assert(tick > 0); */ 2190 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2191 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2192 if (val > INT_MAX) { 2193 error = EDOM; 2194 goto bad; 2195 } 2196 if (val == 0 && tv.tv_usec != 0) 2197 val = 1; 2198 2199 switch (sopt->sopt_name) { 2200 case SO_SNDTIMEO: 2201 so->so_snd.ssb_timeo = val; 2202 break; 2203 case SO_RCVTIMEO: 2204 so->so_rcv.ssb_timeo = val; 2205 break; 2206 } 2207 break; 2208 default: 2209 error = ENOPROTOOPT; 2210 break; 2211 } 2212 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2213 (void) so_pr_ctloutput(so, sopt); 2214 } 2215 } 2216 bad: 2217 return (error); 2218 } 2219 2220 /* Helper routine for getsockopt */ 2221 int 2222 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2223 { 2224 soopt_from_kbuf(sopt, buf, len); 2225 return 0; 2226 } 2227 2228 void 2229 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2230 { 2231 size_t valsize; 2232 2233 if (len == 0) { 2234 sopt->sopt_valsize = 0; 2235 return; 2236 } 2237 2238 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2239 KKASSERT(kva_p(buf)); 2240 2241 /* 2242 * Documented get behavior is that we always return a value, 2243 * possibly truncated to fit in the user's buffer. 2244 * Traditional behavior is that we always tell the user 2245 * precisely how much we copied, rather than something useful 2246 * like the total amount we had available for her. 2247 * Note that this interface is not idempotent; the entire answer must 2248 * generated ahead of time. 2249 */ 2250 valsize = szmin(len, sopt->sopt_valsize); 2251 sopt->sopt_valsize = valsize; 2252 if (sopt->sopt_val != 0) { 2253 bcopy(buf, sopt->sopt_val, valsize); 2254 } 2255 } 2256 2257 int 2258 sogetopt(struct socket *so, struct sockopt *sopt) 2259 { 2260 int error, optval; 2261 long optval_l; 2262 struct linger l; 2263 struct timeval tv; 2264 #ifdef INET 2265 struct accept_filter_arg *afap; 2266 #endif 2267 2268 error = 0; 2269 sopt->sopt_dir = SOPT_GET; 2270 if (sopt->sopt_level != SOL_SOCKET) { 2271 if (so->so_proto && so->so_proto->pr_ctloutput) { 2272 return (so_pr_ctloutput(so, sopt)); 2273 } else 2274 return (ENOPROTOOPT); 2275 } else { 2276 switch (sopt->sopt_name) { 2277 #ifdef INET 2278 case SO_ACCEPTFILTER: 2279 if ((so->so_options & SO_ACCEPTCONN) == 0) 2280 return (EINVAL); 2281 afap = kmalloc(sizeof(*afap), M_TEMP, 2282 M_WAITOK | M_ZERO); 2283 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2284 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2285 if (so->so_accf->so_accept_filter_str != NULL) 2286 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2287 } 2288 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2289 kfree(afap, M_TEMP); 2290 break; 2291 #endif /* INET */ 2292 2293 case SO_LINGER: 2294 l.l_onoff = so->so_options & SO_LINGER; 2295 l.l_linger = so->so_linger; 2296 error = sooptcopyout(sopt, &l, sizeof l); 2297 break; 2298 2299 case SO_USELOOPBACK: 2300 case SO_DONTROUTE: 2301 case SO_DEBUG: 2302 case SO_KEEPALIVE: 2303 case SO_REUSEADDR: 2304 case SO_REUSEPORT: 2305 case SO_BROADCAST: 2306 case SO_OOBINLINE: 2307 case SO_TIMESTAMP: 2308 case SO_NOSIGPIPE: 2309 optval = so->so_options & sopt->sopt_name; 2310 integer: 2311 error = sooptcopyout(sopt, &optval, sizeof optval); 2312 break; 2313 2314 case SO_TYPE: 2315 optval = so->so_type; 2316 goto integer; 2317 2318 case SO_ERROR: 2319 optval = so->so_error; 2320 so->so_error = 0; 2321 goto integer; 2322 2323 case SO_SNDBUF: 2324 optval = so->so_snd.ssb_hiwat; 2325 goto integer; 2326 2327 case SO_RCVBUF: 2328 optval = so->so_rcv.ssb_hiwat; 2329 goto integer; 2330 2331 case SO_SNDLOWAT: 2332 optval = so->so_snd.ssb_lowat; 2333 goto integer; 2334 2335 case SO_RCVLOWAT: 2336 optval = so->so_rcv.ssb_lowat; 2337 goto integer; 2338 2339 case SO_SNDTIMEO: 2340 case SO_RCVTIMEO: 2341 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2342 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2343 2344 tv.tv_sec = optval / hz; 2345 tv.tv_usec = (optval % hz) * ustick; 2346 error = sooptcopyout(sopt, &tv, sizeof tv); 2347 break; 2348 2349 case SO_SNDSPACE: 2350 optval_l = ssb_space(&so->so_snd); 2351 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2352 break; 2353 2354 case SO_CPUHINT: 2355 optval = -1; /* no hint */ 2356 goto integer; 2357 2358 default: 2359 error = ENOPROTOOPT; 2360 break; 2361 } 2362 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2363 so_pr_ctloutput(so, sopt); 2364 return (error); 2365 } 2366 } 2367 2368 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2369 int 2370 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2371 { 2372 struct mbuf *m, *m_prev; 2373 int sopt_size = sopt->sopt_valsize, msize; 2374 2375 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2376 0, &msize); 2377 if (m == NULL) 2378 return (ENOBUFS); 2379 m->m_len = min(msize, sopt_size); 2380 sopt_size -= m->m_len; 2381 *mp = m; 2382 m_prev = m; 2383 2384 while (sopt_size > 0) { 2385 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2386 MT_DATA, 0, &msize); 2387 if (m == NULL) { 2388 m_freem(*mp); 2389 return (ENOBUFS); 2390 } 2391 m->m_len = min(msize, sopt_size); 2392 sopt_size -= m->m_len; 2393 m_prev->m_next = m; 2394 m_prev = m; 2395 } 2396 return (0); 2397 } 2398 2399 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2400 int 2401 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2402 { 2403 soopt_to_mbuf(sopt, m); 2404 return 0; 2405 } 2406 2407 void 2408 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2409 { 2410 size_t valsize; 2411 void *val; 2412 2413 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2414 KKASSERT(kva_p(m)); 2415 if (sopt->sopt_val == NULL) 2416 return; 2417 val = sopt->sopt_val; 2418 valsize = sopt->sopt_valsize; 2419 while (m != NULL && valsize >= m->m_len) { 2420 bcopy(val, mtod(m, char *), m->m_len); 2421 valsize -= m->m_len; 2422 val = (caddr_t)val + m->m_len; 2423 m = m->m_next; 2424 } 2425 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2426 panic("ip6_sooptmcopyin"); 2427 } 2428 2429 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2430 int 2431 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2432 { 2433 return soopt_from_mbuf(sopt, m); 2434 } 2435 2436 int 2437 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2438 { 2439 struct mbuf *m0 = m; 2440 size_t valsize = 0; 2441 size_t maxsize; 2442 void *val; 2443 2444 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2445 KKASSERT(kva_p(m)); 2446 if (sopt->sopt_val == NULL) 2447 return 0; 2448 val = sopt->sopt_val; 2449 maxsize = sopt->sopt_valsize; 2450 while (m != NULL && maxsize >= m->m_len) { 2451 bcopy(mtod(m, char *), val, m->m_len); 2452 maxsize -= m->m_len; 2453 val = (caddr_t)val + m->m_len; 2454 valsize += m->m_len; 2455 m = m->m_next; 2456 } 2457 if (m != NULL) { 2458 /* enough soopt buffer should be given from user-land */ 2459 m_freem(m0); 2460 return (EINVAL); 2461 } 2462 sopt->sopt_valsize = valsize; 2463 return 0; 2464 } 2465 2466 void 2467 sohasoutofband(struct socket *so) 2468 { 2469 if (so->so_sigio != NULL) 2470 pgsigio(so->so_sigio, SIGURG, 0); 2471 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2472 } 2473 2474 int 2475 sokqfilter(struct file *fp, struct knote *kn) 2476 { 2477 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2478 struct signalsockbuf *ssb; 2479 2480 switch (kn->kn_filter) { 2481 case EVFILT_READ: 2482 if (so->so_options & SO_ACCEPTCONN) 2483 kn->kn_fop = &solisten_filtops; 2484 else 2485 kn->kn_fop = &soread_filtops; 2486 ssb = &so->so_rcv; 2487 break; 2488 case EVFILT_WRITE: 2489 kn->kn_fop = &sowrite_filtops; 2490 ssb = &so->so_snd; 2491 break; 2492 case EVFILT_EXCEPT: 2493 kn->kn_fop = &soexcept_filtops; 2494 ssb = &so->so_rcv; 2495 break; 2496 default: 2497 return (EOPNOTSUPP); 2498 } 2499 2500 knote_insert(&ssb->ssb_kq.ki_note, kn); 2501 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2502 return (0); 2503 } 2504 2505 static void 2506 filt_sordetach(struct knote *kn) 2507 { 2508 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2509 2510 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2511 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2512 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2513 } 2514 2515 /*ARGSUSED*/ 2516 static int 2517 filt_soread(struct knote *kn, long hint) 2518 { 2519 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2520 2521 if (kn->kn_sfflags & NOTE_OOB) { 2522 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2523 kn->kn_fflags |= NOTE_OOB; 2524 return (1); 2525 } 2526 return (0); 2527 } 2528 kn->kn_data = so->so_rcv.ssb_cc; 2529 2530 if (so->so_state & SS_CANTRCVMORE) { 2531 /* 2532 * Only set NODATA if all data has been exhausted. 2533 */ 2534 if (kn->kn_data == 0) 2535 kn->kn_flags |= EV_NODATA; 2536 kn->kn_flags |= EV_EOF; 2537 kn->kn_fflags = so->so_error; 2538 return (1); 2539 } 2540 if (so->so_error) /* temporary udp error */ 2541 return (1); 2542 if (kn->kn_sfflags & NOTE_LOWAT) 2543 return (kn->kn_data >= kn->kn_sdata); 2544 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2545 !TAILQ_EMPTY(&so->so_comp)); 2546 } 2547 2548 static void 2549 filt_sowdetach(struct knote *kn) 2550 { 2551 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2552 2553 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2554 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2555 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2556 } 2557 2558 /*ARGSUSED*/ 2559 static int 2560 filt_sowrite(struct knote *kn, long hint) 2561 { 2562 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2563 2564 kn->kn_data = ssb_space(&so->so_snd); 2565 if (so->so_state & SS_CANTSENDMORE) { 2566 kn->kn_flags |= (EV_EOF | EV_NODATA); 2567 kn->kn_fflags = so->so_error; 2568 return (1); 2569 } 2570 if (so->so_error) /* temporary udp error */ 2571 return (1); 2572 if (((so->so_state & SS_ISCONNECTED) == 0) && 2573 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2574 return (0); 2575 if (kn->kn_sfflags & NOTE_LOWAT) 2576 return (kn->kn_data >= kn->kn_sdata); 2577 return (kn->kn_data >= so->so_snd.ssb_lowat); 2578 } 2579 2580 /*ARGSUSED*/ 2581 static int 2582 filt_solisten(struct knote *kn, long hint) 2583 { 2584 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2585 2586 kn->kn_data = so->so_qlen; 2587 return (! TAILQ_EMPTY(&so->so_comp)); 2588 } 2589