1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 /* 152 * Socket operation routines. 153 * These routines are called by the routines in 154 * sys_socket.c or from a system process, and 155 * implement the semantics of socket operations by 156 * switching out to the protocol specific routines. 157 */ 158 159 /* 160 * Get a socket structure, and initialize it. 161 * Note that it would probably be better to allocate socket 162 * and PCB at the same time, but I'm not convinced that all 163 * the protocols can be easily modified to do this. 164 */ 165 struct socket * 166 soalloc(int waitok, struct protosw *pr) 167 { 168 struct socket *so; 169 unsigned waitmask; 170 171 waitmask = waitok ? M_WAITOK : M_NOWAIT; 172 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 173 if (so) { 174 /* XXX race condition for reentrant kernel */ 175 so->so_proto = pr; 176 TAILQ_INIT(&so->so_aiojobq); 177 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 178 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 179 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 180 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 181 spin_init(&so->so_rcvd_spin, "soalloc"); 182 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 183 MSGF_DROPABLE | MSGF_PRIORITY, 184 so->so_proto->pr_usrreqs->pru_rcvd); 185 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 186 so->so_state = SS_NOFDREF; 187 so->so_refs = 1; 188 } 189 return so; 190 } 191 192 int 193 socreate(int dom, struct socket **aso, int type, 194 int proto, struct thread *td) 195 { 196 struct proc *p = td->td_proc; 197 struct protosw *prp; 198 struct socket *so; 199 struct pru_attach_info ai; 200 int error; 201 202 if (proto) 203 prp = pffindproto(dom, proto, type); 204 else 205 prp = pffindtype(dom, type); 206 207 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 208 return (EPROTONOSUPPORT); 209 210 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 211 prp->pr_domain->dom_family != PF_LOCAL && 212 prp->pr_domain->dom_family != PF_INET && 213 prp->pr_domain->dom_family != PF_INET6 && 214 prp->pr_domain->dom_family != PF_ROUTE) { 215 return (EPROTONOSUPPORT); 216 } 217 218 if (prp->pr_type != type) 219 return (EPROTOTYPE); 220 so = soalloc(p != NULL, prp); 221 if (so == NULL) 222 return (ENOBUFS); 223 224 /* 225 * Callers of socreate() presumably will connect up a descriptor 226 * and call soclose() if they cannot. This represents our so_refs 227 * (which should be 1) from soalloc(). 228 */ 229 soclrstate(so, SS_NOFDREF); 230 231 /* 232 * Set a default port for protocol processing. No action will occur 233 * on the socket on this port until an inpcb is attached to it and 234 * is able to match incoming packets, or until the socket becomes 235 * available to userland. 236 * 237 * We normally default the socket to the protocol thread on cpu 0, 238 * if protocol does not provide its own method to initialize the 239 * default port. 240 * 241 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 242 * thread and all pr_*()/pru_*() calls are executed synchronously. 243 */ 244 if (prp->pr_flags & PR_SYNC_PORT) 245 so->so_port = &netisr_sync_port; 246 else if (prp->pr_initport != NULL) 247 so->so_port = prp->pr_initport(); 248 else 249 so->so_port = netisr_cpuport(0); 250 251 TAILQ_INIT(&so->so_incomp); 252 TAILQ_INIT(&so->so_comp); 253 so->so_type = type; 254 so->so_cred = crhold(p->p_ucred); 255 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 256 ai.p_ucred = p->p_ucred; 257 ai.fd_rdir = p->p_fd->fd_rdir; 258 259 /* 260 * Auto-sizing of socket buffers is managed by the protocols and 261 * the appropriate flags must be set in the pru_attach function. 262 */ 263 error = so_pru_attach(so, proto, &ai); 264 if (error) { 265 sosetstate(so, SS_NOFDREF); 266 sofree(so); /* from soalloc */ 267 return error; 268 } 269 270 /* 271 * NOTE: Returns referenced socket. 272 */ 273 *aso = so; 274 return (0); 275 } 276 277 int 278 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 279 { 280 int error; 281 282 error = so_pru_bind(so, nam, td); 283 return (error); 284 } 285 286 static void 287 sodealloc(struct socket *so) 288 { 289 if (so->so_rcv.ssb_hiwat) 290 (void)chgsbsize(so->so_cred->cr_uidinfo, 291 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 292 if (so->so_snd.ssb_hiwat) 293 (void)chgsbsize(so->so_cred->cr_uidinfo, 294 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 295 #ifdef INET 296 /* remove accept filter if present */ 297 if (so->so_accf != NULL) 298 do_setopt_accept_filter(so, NULL); 299 #endif /* INET */ 300 crfree(so->so_cred); 301 if (so->so_faddr != NULL) 302 kfree(so->so_faddr, M_SONAME); 303 kfree(so, M_SOCKET); 304 } 305 306 int 307 solisten(struct socket *so, int backlog, struct thread *td) 308 { 309 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 310 return (EINVAL); 311 312 lwkt_gettoken(&so->so_rcv.ssb_token); 313 if (TAILQ_EMPTY(&so->so_comp)) 314 so->so_options |= SO_ACCEPTCONN; 315 lwkt_reltoken(&so->so_rcv.ssb_token); 316 if (backlog < 0 || backlog > somaxconn) 317 backlog = somaxconn; 318 so->so_qlimit = backlog; 319 return so_pru_listen(so, td); 320 } 321 322 /* 323 * Destroy a disconnected socket. This routine is a NOP if entities 324 * still have a reference on the socket: 325 * 326 * so_pcb - The protocol stack still has a reference 327 * SS_NOFDREF - There is no longer a file pointer reference 328 */ 329 void 330 sofree(struct socket *so) 331 { 332 struct socket *head; 333 334 /* 335 * This is a bit hackish at the moment. We need to interlock 336 * any accept queue we are on before we potentially lose the 337 * last reference to avoid races against a re-reference from 338 * someone operating on the queue. 339 */ 340 while ((head = so->so_head) != NULL) { 341 lwkt_getpooltoken(head); 342 if (so->so_head == head) 343 break; 344 lwkt_relpooltoken(head); 345 } 346 347 /* 348 * Arbitrage the last free. 349 */ 350 KKASSERT(so->so_refs > 0); 351 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 352 if (head) 353 lwkt_relpooltoken(head); 354 return; 355 } 356 357 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 358 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 359 360 /* 361 * We're done, remove ourselves from the accept queue we are 362 * on, if we are on one. 363 */ 364 if (head != NULL) { 365 if (so->so_state & SS_INCOMP) { 366 TAILQ_REMOVE(&head->so_incomp, so, so_list); 367 head->so_incqlen--; 368 } else if (so->so_state & SS_COMP) { 369 /* 370 * We must not decommission a socket that's 371 * on the accept(2) queue. If we do, then 372 * accept(2) may hang after select(2) indicated 373 * that the listening socket was ready. 374 */ 375 lwkt_relpooltoken(head); 376 return; 377 } else { 378 panic("sofree: not queued"); 379 } 380 soclrstate(so, SS_INCOMP); 381 so->so_head = NULL; 382 lwkt_relpooltoken(head); 383 } 384 ssb_release(&so->so_snd, so); 385 sorflush(so); 386 sodealloc(so); 387 } 388 389 /* 390 * Close a socket on last file table reference removal. 391 * Initiate disconnect if connected. 392 * Free socket when disconnect complete. 393 */ 394 int 395 soclose(struct socket *so, int fflag) 396 { 397 int error; 398 399 funsetown(&so->so_sigio); 400 sosetstate(so, SS_ISCLOSING); 401 if (!use_soclose_fast || 402 (so->so_proto->pr_flags & PR_SYNC_PORT) || 403 ((so->so_state & SS_ISCONNECTED) && 404 (so->so_options & SO_LINGER))) { 405 error = soclose_sync(so, fflag); 406 } else { 407 soclose_fast(so); 408 error = 0; 409 } 410 return error; 411 } 412 413 void 414 sodiscard(struct socket *so) 415 { 416 lwkt_getpooltoken(so); 417 if (so->so_options & SO_ACCEPTCONN) { 418 struct socket *sp; 419 420 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 421 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 422 soclrstate(sp, SS_INCOMP); 423 sp->so_head = NULL; 424 so->so_incqlen--; 425 soabort_async(sp); 426 } 427 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 428 TAILQ_REMOVE(&so->so_comp, sp, so_list); 429 soclrstate(sp, SS_COMP); 430 sp->so_head = NULL; 431 so->so_qlen--; 432 soabort_async(sp); 433 } 434 } 435 lwkt_relpooltoken(so); 436 437 if (so->so_state & SS_NOFDREF) 438 panic("soclose: NOFDREF"); 439 sosetstate(so, SS_NOFDREF); /* take ref */ 440 } 441 442 void 443 soinherit(struct socket *so, struct socket *so_inh) 444 { 445 TAILQ_HEAD(, socket) comp, incomp; 446 struct socket *sp; 447 int qlen, incqlen; 448 449 KASSERT(so->so_options & SO_ACCEPTCONN, 450 ("so does not accept connection")); 451 KASSERT(so_inh->so_options & SO_ACCEPTCONN, 452 ("so_inh does not accept connection")); 453 454 TAILQ_INIT(&comp); 455 TAILQ_INIT(&incomp); 456 457 lwkt_getpooltoken(so); 458 lwkt_getpooltoken(so_inh); 459 460 /* 461 * Save completed queue and incompleted queue 462 */ 463 TAILQ_CONCAT(&comp, &so->so_comp, so_list); 464 qlen = so->so_qlen; 465 so->so_qlen = 0; 466 467 TAILQ_CONCAT(&incomp, &so->so_incomp, so_list); 468 incqlen = so->so_incqlen; 469 so->so_incqlen = 0; 470 471 /* 472 * Append the saved completed queue and incompleted 473 * queue to the socket inherits them. 474 * 475 * XXX 476 * This may temporarily break the inheriting socket's 477 * so_qlimit. 478 */ 479 TAILQ_FOREACH(sp, &comp, so_list) { 480 sp->so_head = so_inh; 481 crfree(sp->so_cred); 482 sp->so_cred = crhold(so_inh->so_cred); 483 } 484 485 TAILQ_FOREACH(sp, &incomp, so_list) { 486 sp->so_head = so_inh; 487 crfree(sp->so_cred); 488 sp->so_cred = crhold(so_inh->so_cred); 489 } 490 491 TAILQ_CONCAT(&so_inh->so_comp, &comp, so_list); 492 so_inh->so_qlen += qlen; 493 494 TAILQ_CONCAT(&so_inh->so_incomp, &incomp, so_list); 495 so_inh->so_incqlen += incqlen; 496 497 lwkt_relpooltoken(so_inh); 498 lwkt_relpooltoken(so); 499 500 if (qlen) { 501 /* 502 * "New" connections have arrived 503 */ 504 sorwakeup(so_inh); 505 wakeup(&so_inh->so_timeo); 506 } 507 } 508 509 static int 510 soclose_sync(struct socket *so, int fflag) 511 { 512 int error = 0; 513 514 if (so->so_pcb == NULL) 515 goto discard; 516 if (so->so_state & SS_ISCONNECTED) { 517 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 518 error = sodisconnect(so); 519 if (error) 520 goto drop; 521 } 522 if (so->so_options & SO_LINGER) { 523 if ((so->so_state & SS_ISDISCONNECTING) && 524 (fflag & FNONBLOCK)) 525 goto drop; 526 while (so->so_state & SS_ISCONNECTED) { 527 error = tsleep(&so->so_timeo, PCATCH, 528 "soclos", so->so_linger * hz); 529 if (error) 530 break; 531 } 532 } 533 } 534 drop: 535 if (so->so_pcb) { 536 int error2; 537 538 error2 = so_pru_detach(so); 539 if (error2 == EJUSTRETURN) { 540 /* 541 * Protocol will call sodiscard() 542 * and sofree() for us. 543 */ 544 return error; 545 } 546 if (error == 0) 547 error = error2; 548 } 549 discard: 550 sodiscard(so); 551 so_pru_sync(so); /* unpend async sending */ 552 sofree(so); /* dispose of ref */ 553 554 return (error); 555 } 556 557 static void 558 soclose_sofree_async_handler(netmsg_t msg) 559 { 560 sofree(msg->base.nm_so); 561 } 562 563 static void 564 soclose_sofree_async(struct socket *so) 565 { 566 struct netmsg_base *base = &so->so_clomsg; 567 568 netmsg_init(base, so, &netisr_apanic_rport, 0, 569 soclose_sofree_async_handler); 570 lwkt_sendmsg(so->so_port, &base->lmsg); 571 } 572 573 static void 574 soclose_disconn_async_handler(netmsg_t msg) 575 { 576 struct socket *so = msg->base.nm_so; 577 578 if ((so->so_state & SS_ISCONNECTED) && 579 (so->so_state & SS_ISDISCONNECTING) == 0) 580 so_pru_disconnect_direct(so); 581 582 if (so->so_pcb) { 583 int error; 584 585 error = so_pru_detach_direct(so); 586 if (error == EJUSTRETURN) { 587 /* 588 * Protocol will call sodiscard() 589 * and sofree() for us. 590 */ 591 return; 592 } 593 } 594 595 sodiscard(so); 596 sofree(so); 597 } 598 599 static void 600 soclose_disconn_async(struct socket *so) 601 { 602 struct netmsg_base *base = &so->so_clomsg; 603 604 netmsg_init(base, so, &netisr_apanic_rport, 0, 605 soclose_disconn_async_handler); 606 lwkt_sendmsg(so->so_port, &base->lmsg); 607 } 608 609 static void 610 soclose_detach_async_handler(netmsg_t msg) 611 { 612 struct socket *so = msg->base.nm_so; 613 614 if (so->so_pcb) { 615 int error; 616 617 error = so_pru_detach_direct(so); 618 if (error == EJUSTRETURN) { 619 /* 620 * Protocol will call sodiscard() 621 * and sofree() for us. 622 */ 623 return; 624 } 625 } 626 627 sodiscard(so); 628 sofree(so); 629 } 630 631 static void 632 soclose_detach_async(struct socket *so) 633 { 634 struct netmsg_base *base = &so->so_clomsg; 635 636 netmsg_init(base, so, &netisr_apanic_rport, 0, 637 soclose_detach_async_handler); 638 lwkt_sendmsg(so->so_port, &base->lmsg); 639 } 640 641 static void 642 soclose_fast(struct socket *so) 643 { 644 if (so->so_pcb == NULL) 645 goto discard; 646 647 if ((so->so_state & SS_ISCONNECTED) && 648 (so->so_state & SS_ISDISCONNECTING) == 0) { 649 soclose_disconn_async(so); 650 return; 651 } 652 653 if (so->so_pcb) { 654 soclose_detach_async(so); 655 return; 656 } 657 658 discard: 659 sodiscard(so); 660 soclose_sofree_async(so); 661 } 662 663 /* 664 * Abort and destroy a socket. Only one abort can be in progress 665 * at any given moment. 666 */ 667 void 668 soabort(struct socket *so) 669 { 670 soreference(so); 671 so_pru_abort(so); 672 } 673 674 void 675 soabort_async(struct socket *so) 676 { 677 soreference(so); 678 so_pru_abort_async(so); 679 } 680 681 void 682 soabort_oncpu(struct socket *so) 683 { 684 soreference(so); 685 so_pru_abort_direct(so); 686 } 687 688 /* 689 * so is passed in ref'd, which becomes owned by 690 * the cleared SS_NOFDREF flag. 691 */ 692 void 693 soaccept_generic(struct socket *so) 694 { 695 if ((so->so_state & SS_NOFDREF) == 0) 696 panic("soaccept: !NOFDREF"); 697 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 698 } 699 700 int 701 soaccept(struct socket *so, struct sockaddr **nam) 702 { 703 int error; 704 705 soaccept_generic(so); 706 error = so_pru_accept(so, nam); 707 return (error); 708 } 709 710 int 711 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 712 boolean_t sync) 713 { 714 int error; 715 716 if (so->so_options & SO_ACCEPTCONN) 717 return (EOPNOTSUPP); 718 /* 719 * If protocol is connection-based, can only connect once. 720 * Otherwise, if connected, try to disconnect first. 721 * This allows user to disconnect by connecting to, e.g., 722 * a null address. 723 */ 724 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 725 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 726 (error = sodisconnect(so)))) { 727 error = EISCONN; 728 } else { 729 /* 730 * Prevent accumulated error from previous connection 731 * from biting us. 732 */ 733 so->so_error = 0; 734 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 735 error = so_pru_connect_async(so, nam, td); 736 else 737 error = so_pru_connect(so, nam, td); 738 } 739 return (error); 740 } 741 742 int 743 soconnect2(struct socket *so1, struct socket *so2) 744 { 745 int error; 746 747 error = so_pru_connect2(so1, so2); 748 return (error); 749 } 750 751 int 752 sodisconnect(struct socket *so) 753 { 754 int error; 755 756 if ((so->so_state & SS_ISCONNECTED) == 0) { 757 error = ENOTCONN; 758 goto bad; 759 } 760 if (so->so_state & SS_ISDISCONNECTING) { 761 error = EALREADY; 762 goto bad; 763 } 764 error = so_pru_disconnect(so); 765 bad: 766 return (error); 767 } 768 769 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 770 /* 771 * Send on a socket. 772 * If send must go all at once and message is larger than 773 * send buffering, then hard error. 774 * Lock against other senders. 775 * If must go all at once and not enough room now, then 776 * inform user that this would block and do nothing. 777 * Otherwise, if nonblocking, send as much as possible. 778 * The data to be sent is described by "uio" if nonzero, 779 * otherwise by the mbuf chain "top" (which must be null 780 * if uio is not). Data provided in mbuf chain must be small 781 * enough to send all at once. 782 * 783 * Returns nonzero on error, timeout or signal; callers 784 * must check for short counts if EINTR/ERESTART are returned. 785 * Data and control buffers are freed on return. 786 */ 787 int 788 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 789 struct mbuf *top, struct mbuf *control, int flags, 790 struct thread *td) 791 { 792 struct mbuf **mp; 793 struct mbuf *m; 794 size_t resid; 795 int space, len; 796 int clen = 0, error, dontroute, mlen; 797 int atomic = sosendallatonce(so) || top; 798 int pru_flags; 799 800 if (uio) { 801 resid = uio->uio_resid; 802 } else { 803 resid = (size_t)top->m_pkthdr.len; 804 #ifdef INVARIANTS 805 len = 0; 806 for (m = top; m; m = m->m_next) 807 len += m->m_len; 808 KKASSERT(top->m_pkthdr.len == len); 809 #endif 810 } 811 812 /* 813 * WARNING! resid is unsigned, space and len are signed. space 814 * can wind up negative if the sockbuf is overcommitted. 815 * 816 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 817 * type sockets since that's an error. 818 */ 819 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 820 error = EINVAL; 821 goto out; 822 } 823 824 dontroute = 825 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 826 (so->so_proto->pr_flags & PR_ATOMIC); 827 if (td->td_lwp != NULL) 828 td->td_lwp->lwp_ru.ru_msgsnd++; 829 if (control) 830 clen = control->m_len; 831 #define gotoerr(errcode) { error = errcode; goto release; } 832 833 restart: 834 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 835 if (error) 836 goto out; 837 838 do { 839 if (so->so_state & SS_CANTSENDMORE) 840 gotoerr(EPIPE); 841 if (so->so_error) { 842 error = so->so_error; 843 so->so_error = 0; 844 goto release; 845 } 846 if ((so->so_state & SS_ISCONNECTED) == 0) { 847 /* 848 * `sendto' and `sendmsg' is allowed on a connection- 849 * based socket if it supports implied connect. 850 * Return ENOTCONN if not connected and no address is 851 * supplied. 852 */ 853 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 854 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 855 if ((so->so_state & SS_ISCONFIRMING) == 0 && 856 !(resid == 0 && clen != 0)) 857 gotoerr(ENOTCONN); 858 } else if (addr == NULL) 859 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 860 ENOTCONN : EDESTADDRREQ); 861 } 862 if ((atomic && resid > so->so_snd.ssb_hiwat) || 863 clen > so->so_snd.ssb_hiwat) { 864 gotoerr(EMSGSIZE); 865 } 866 space = ssb_space(&so->so_snd); 867 if (flags & MSG_OOB) 868 space += 1024; 869 if ((space < 0 || (size_t)space < resid + clen) && uio && 870 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 871 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 872 gotoerr(EWOULDBLOCK); 873 ssb_unlock(&so->so_snd); 874 error = ssb_wait(&so->so_snd); 875 if (error) 876 goto out; 877 goto restart; 878 } 879 mp = ⊤ 880 space -= clen; 881 do { 882 if (uio == NULL) { 883 /* 884 * Data is prepackaged in "top". 885 */ 886 resid = 0; 887 if (flags & MSG_EOR) 888 top->m_flags |= M_EOR; 889 } else do { 890 if (resid > INT_MAX) 891 resid = INT_MAX; 892 m = m_getl((int)resid, M_WAITOK, MT_DATA, 893 top == NULL ? M_PKTHDR : 0, &mlen); 894 if (top == NULL) { 895 m->m_pkthdr.len = 0; 896 m->m_pkthdr.rcvif = NULL; 897 } 898 len = imin((int)szmin(mlen, resid), space); 899 if (resid < MINCLSIZE) { 900 /* 901 * For datagram protocols, leave room 902 * for protocol headers in first mbuf. 903 */ 904 if (atomic && top == NULL && len < mlen) 905 MH_ALIGN(m, len); 906 } 907 space -= len; 908 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 909 resid = uio->uio_resid; 910 m->m_len = len; 911 *mp = m; 912 top->m_pkthdr.len += len; 913 if (error) 914 goto release; 915 mp = &m->m_next; 916 if (resid == 0) { 917 if (flags & MSG_EOR) 918 top->m_flags |= M_EOR; 919 break; 920 } 921 } while (space > 0 && atomic); 922 if (dontroute) 923 so->so_options |= SO_DONTROUTE; 924 if (flags & MSG_OOB) { 925 pru_flags = PRUS_OOB; 926 } else if ((flags & MSG_EOF) && 927 (so->so_proto->pr_flags & PR_IMPLOPCL) && 928 (resid == 0)) { 929 /* 930 * If the user set MSG_EOF, the protocol 931 * understands this flag and nothing left to 932 * send then use PRU_SEND_EOF instead of PRU_SEND. 933 */ 934 pru_flags = PRUS_EOF; 935 } else if (resid > 0 && space > 0) { 936 /* If there is more to send, set PRUS_MORETOCOME */ 937 pru_flags = PRUS_MORETOCOME; 938 } else { 939 pru_flags = 0; 940 } 941 /* 942 * XXX all the SS_CANTSENDMORE checks previously 943 * done could be out of date. We could have recieved 944 * a reset packet in an interrupt or maybe we slept 945 * while doing page faults in uiomove() etc. We could 946 * probably recheck again inside the splnet() protection 947 * here, but there are probably other places that this 948 * also happens. We must rethink this. 949 */ 950 error = so_pru_send(so, pru_flags, top, addr, control, td); 951 if (dontroute) 952 so->so_options &= ~SO_DONTROUTE; 953 clen = 0; 954 control = NULL; 955 top = NULL; 956 mp = ⊤ 957 if (error) 958 goto release; 959 } while (resid && space > 0); 960 } while (resid); 961 962 release: 963 ssb_unlock(&so->so_snd); 964 out: 965 if (top) 966 m_freem(top); 967 if (control) 968 m_freem(control); 969 return (error); 970 } 971 972 #ifdef INET 973 /* 974 * A specialization of sosend() for UDP based on protocol-specific knowledge: 975 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 976 * sosendallatonce() returns true, 977 * the "atomic" variable is true, 978 * and sosendudp() blocks until space is available for the entire send. 979 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 980 * PR_IMPLOPCL flags set. 981 * UDP has no out-of-band data. 982 * UDP has no control data. 983 * UDP does not support MSG_EOR. 984 */ 985 int 986 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 987 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 988 { 989 size_t resid; 990 int error, pru_flags = 0; 991 int space; 992 993 if (td->td_lwp != NULL) 994 td->td_lwp->lwp_ru.ru_msgsnd++; 995 if (control) 996 m_freem(control); 997 998 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 999 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 1000 1001 restart: 1002 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1003 if (error) 1004 goto out; 1005 1006 if (so->so_state & SS_CANTSENDMORE) 1007 gotoerr(EPIPE); 1008 if (so->so_error) { 1009 error = so->so_error; 1010 so->so_error = 0; 1011 goto release; 1012 } 1013 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 1014 gotoerr(EDESTADDRREQ); 1015 if (resid > so->so_snd.ssb_hiwat) 1016 gotoerr(EMSGSIZE); 1017 space = ssb_space(&so->so_snd); 1018 if (uio && (space < 0 || (size_t)space < resid)) { 1019 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1020 gotoerr(EWOULDBLOCK); 1021 ssb_unlock(&so->so_snd); 1022 error = ssb_wait(&so->so_snd); 1023 if (error) 1024 goto out; 1025 goto restart; 1026 } 1027 1028 if (uio) { 1029 int hdrlen = max_hdr; 1030 1031 /* 1032 * We try to optimize out the additional mbuf 1033 * allocations in M_PREPEND() on output path, e.g. 1034 * - udp_output(), when it tries to prepend protocol 1035 * headers. 1036 * - Link layer output function, when it tries to 1037 * prepend link layer header. 1038 * 1039 * This probably will not benefit any data that will 1040 * be fragmented, so this optimization is only performed 1041 * when the size of data and max size of protocol+link 1042 * headers fit into one mbuf cluster. 1043 */ 1044 if (uio->uio_resid > MCLBYTES - hdrlen || 1045 !udp_sosend_prepend) { 1046 top = m_uiomove(uio); 1047 if (top == NULL) 1048 goto release; 1049 } else { 1050 int nsize; 1051 1052 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1053 MT_DATA, M_PKTHDR, &nsize); 1054 KASSERT(nsize >= uio->uio_resid + hdrlen, 1055 ("sosendudp invalid nsize %d, " 1056 "resid %zu, hdrlen %d", 1057 nsize, uio->uio_resid, hdrlen)); 1058 1059 top->m_len = uio->uio_resid; 1060 top->m_pkthdr.len = uio->uio_resid; 1061 top->m_data += hdrlen; 1062 1063 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1064 if (error) 1065 goto out; 1066 } 1067 } 1068 1069 if (flags & MSG_DONTROUTE) 1070 pru_flags |= PRUS_DONTROUTE; 1071 1072 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1073 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1074 error = 0; 1075 } else { 1076 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1077 } 1078 top = NULL; /* sent or freed in lower layer */ 1079 1080 release: 1081 ssb_unlock(&so->so_snd); 1082 out: 1083 if (top) 1084 m_freem(top); 1085 return (error); 1086 } 1087 1088 int 1089 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1090 struct mbuf *top, struct mbuf *control, int flags, 1091 struct thread *td) 1092 { 1093 struct mbuf **mp; 1094 struct mbuf *m; 1095 size_t resid; 1096 int space, len; 1097 int error, mlen; 1098 int allatonce; 1099 int pru_flags; 1100 1101 if (uio) { 1102 KKASSERT(top == NULL); 1103 allatonce = 0; 1104 resid = uio->uio_resid; 1105 } else { 1106 allatonce = 1; 1107 resid = (size_t)top->m_pkthdr.len; 1108 #ifdef INVARIANTS 1109 len = 0; 1110 for (m = top; m; m = m->m_next) 1111 len += m->m_len; 1112 KKASSERT(top->m_pkthdr.len == len); 1113 #endif 1114 } 1115 1116 /* 1117 * WARNING! resid is unsigned, space and len are signed. space 1118 * can wind up negative if the sockbuf is overcommitted. 1119 * 1120 * Also check to make sure that MSG_EOR isn't used on TCP 1121 */ 1122 if (flags & MSG_EOR) { 1123 error = EINVAL; 1124 goto out; 1125 } 1126 1127 if (control) { 1128 /* TCP doesn't do control messages (rights, creds, etc) */ 1129 if (control->m_len) { 1130 error = EINVAL; 1131 goto out; 1132 } 1133 m_freem(control); /* empty control, just free it */ 1134 control = NULL; 1135 } 1136 1137 if (td->td_lwp != NULL) 1138 td->td_lwp->lwp_ru.ru_msgsnd++; 1139 1140 #define gotoerr(errcode) { error = errcode; goto release; } 1141 1142 restart: 1143 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1144 if (error) 1145 goto out; 1146 1147 do { 1148 if (so->so_state & SS_CANTSENDMORE) 1149 gotoerr(EPIPE); 1150 if (so->so_error) { 1151 error = so->so_error; 1152 so->so_error = 0; 1153 goto release; 1154 } 1155 if ((so->so_state & SS_ISCONNECTED) == 0 && 1156 (so->so_state & SS_ISCONFIRMING) == 0) 1157 gotoerr(ENOTCONN); 1158 if (allatonce && resid > so->so_snd.ssb_hiwat) 1159 gotoerr(EMSGSIZE); 1160 1161 space = ssb_space_prealloc(&so->so_snd); 1162 if (flags & MSG_OOB) 1163 space += 1024; 1164 if ((space < 0 || (size_t)space < resid) && !allatonce && 1165 space < so->so_snd.ssb_lowat) { 1166 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1167 gotoerr(EWOULDBLOCK); 1168 ssb_unlock(&so->so_snd); 1169 error = ssb_wait(&so->so_snd); 1170 if (error) 1171 goto out; 1172 goto restart; 1173 } 1174 mp = ⊤ 1175 do { 1176 int cnt = 0, async = 0; 1177 1178 if (uio == NULL) { 1179 /* 1180 * Data is prepackaged in "top". 1181 */ 1182 resid = 0; 1183 } else do { 1184 if (resid > INT_MAX) 1185 resid = INT_MAX; 1186 if (tcp_sosend_jcluster) { 1187 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1188 top == NULL ? M_PKTHDR : 0, &mlen); 1189 } else { 1190 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1191 top == NULL ? M_PKTHDR : 0, &mlen); 1192 } 1193 if (top == NULL) { 1194 m->m_pkthdr.len = 0; 1195 m->m_pkthdr.rcvif = NULL; 1196 } 1197 len = imin((int)szmin(mlen, resid), space); 1198 space -= len; 1199 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1200 resid = uio->uio_resid; 1201 m->m_len = len; 1202 *mp = m; 1203 top->m_pkthdr.len += len; 1204 if (error) 1205 goto release; 1206 mp = &m->m_next; 1207 if (resid == 0) 1208 break; 1209 ++cnt; 1210 } while (space > 0 && cnt < tcp_sosend_agglim); 1211 1212 if (tcp_sosend_async) 1213 async = 1; 1214 1215 if (flags & MSG_OOB) { 1216 pru_flags = PRUS_OOB; 1217 async = 0; 1218 } else if ((flags & MSG_EOF) && resid == 0) { 1219 pru_flags = PRUS_EOF; 1220 } else if (resid > 0 && space > 0) { 1221 /* If there is more to send, set PRUS_MORETOCOME */ 1222 pru_flags = PRUS_MORETOCOME; 1223 async = 1; 1224 } else { 1225 pru_flags = 0; 1226 } 1227 1228 if (flags & MSG_SYNC) 1229 async = 0; 1230 1231 /* 1232 * XXX all the SS_CANTSENDMORE checks previously 1233 * done could be out of date. We could have recieved 1234 * a reset packet in an interrupt or maybe we slept 1235 * while doing page faults in uiomove() etc. We could 1236 * probably recheck again inside the splnet() protection 1237 * here, but there are probably other places that this 1238 * also happens. We must rethink this. 1239 */ 1240 for (m = top; m; m = m->m_next) 1241 ssb_preallocstream(&so->so_snd, m); 1242 if (!async) { 1243 error = so_pru_send(so, pru_flags, top, 1244 NULL, NULL, td); 1245 } else { 1246 so_pru_send_async(so, pru_flags, top, 1247 NULL, NULL, td); 1248 error = 0; 1249 } 1250 1251 top = NULL; 1252 mp = ⊤ 1253 if (error) 1254 goto release; 1255 } while (resid && space > 0); 1256 } while (resid); 1257 1258 release: 1259 ssb_unlock(&so->so_snd); 1260 out: 1261 if (top) 1262 m_freem(top); 1263 if (control) 1264 m_freem(control); 1265 return (error); 1266 } 1267 #endif 1268 1269 /* 1270 * Implement receive operations on a socket. 1271 * 1272 * We depend on the way that records are added to the signalsockbuf 1273 * by sbappend*. In particular, each record (mbufs linked through m_next) 1274 * must begin with an address if the protocol so specifies, 1275 * followed by an optional mbuf or mbufs containing ancillary data, 1276 * and then zero or more mbufs of data. 1277 * 1278 * Although the signalsockbuf is locked, new data may still be appended. 1279 * A token inside the ssb_lock deals with MP issues and still allows 1280 * the network to access the socket if we block in a uio. 1281 * 1282 * The caller may receive the data as a single mbuf chain by supplying 1283 * an mbuf **mp0 for use in returning the chain. The uio is then used 1284 * only for the count in uio_resid. 1285 */ 1286 int 1287 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1288 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1289 { 1290 struct mbuf *m, *n; 1291 struct mbuf *free_chain = NULL; 1292 int flags, len, error, offset; 1293 struct protosw *pr = so->so_proto; 1294 int moff, type = 0; 1295 size_t resid, orig_resid; 1296 1297 if (uio) 1298 resid = uio->uio_resid; 1299 else 1300 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1301 orig_resid = resid; 1302 1303 if (psa) 1304 *psa = NULL; 1305 if (controlp) 1306 *controlp = NULL; 1307 if (flagsp) 1308 flags = *flagsp &~ MSG_EOR; 1309 else 1310 flags = 0; 1311 if (flags & MSG_OOB) { 1312 m = m_get(M_WAITOK, MT_DATA); 1313 if (m == NULL) 1314 return (ENOBUFS); 1315 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1316 if (error) 1317 goto bad; 1318 if (sio) { 1319 do { 1320 sbappend(sio, m); 1321 KKASSERT(resid >= (size_t)m->m_len); 1322 resid -= (size_t)m->m_len; 1323 } while (resid > 0 && m); 1324 } else { 1325 do { 1326 uio->uio_resid = resid; 1327 error = uiomove(mtod(m, caddr_t), 1328 (int)szmin(resid, m->m_len), 1329 uio); 1330 resid = uio->uio_resid; 1331 m = m_free(m); 1332 } while (uio->uio_resid && error == 0 && m); 1333 } 1334 bad: 1335 if (m) 1336 m_freem(m); 1337 return (error); 1338 } 1339 if ((so->so_state & SS_ISCONFIRMING) && resid) 1340 so_pru_rcvd(so, 0); 1341 1342 /* 1343 * The token interlocks against the protocol thread while 1344 * ssb_lock is a blocking lock against other userland entities. 1345 */ 1346 lwkt_gettoken(&so->so_rcv.ssb_token); 1347 restart: 1348 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1349 if (error) 1350 goto done; 1351 1352 m = so->so_rcv.ssb_mb; 1353 /* 1354 * If we have less data than requested, block awaiting more 1355 * (subject to any timeout) if: 1356 * 1. the current count is less than the low water mark, or 1357 * 2. MSG_WAITALL is set, and it is possible to do the entire 1358 * receive operation at once if we block (resid <= hiwat). 1359 * 3. MSG_DONTWAIT is not set 1360 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1361 * we have to do the receive in sections, and thus risk returning 1362 * a short count if a timeout or signal occurs after we start. 1363 */ 1364 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1365 (size_t)so->so_rcv.ssb_cc < resid) && 1366 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1367 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1368 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1369 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1370 if (so->so_error) { 1371 if (m) 1372 goto dontblock; 1373 error = so->so_error; 1374 if ((flags & MSG_PEEK) == 0) 1375 so->so_error = 0; 1376 goto release; 1377 } 1378 if (so->so_state & SS_CANTRCVMORE) { 1379 if (m) 1380 goto dontblock; 1381 else 1382 goto release; 1383 } 1384 for (; m; m = m->m_next) { 1385 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1386 m = so->so_rcv.ssb_mb; 1387 goto dontblock; 1388 } 1389 } 1390 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1391 (pr->pr_flags & PR_CONNREQUIRED)) { 1392 error = ENOTCONN; 1393 goto release; 1394 } 1395 if (resid == 0) 1396 goto release; 1397 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1398 error = EWOULDBLOCK; 1399 goto release; 1400 } 1401 ssb_unlock(&so->so_rcv); 1402 error = ssb_wait(&so->so_rcv); 1403 if (error) 1404 goto done; 1405 goto restart; 1406 } 1407 dontblock: 1408 if (uio && uio->uio_td && uio->uio_td->td_proc) 1409 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1410 1411 /* 1412 * note: m should be == sb_mb here. Cache the next record while 1413 * cleaning up. Note that calling m_free*() will break out critical 1414 * section. 1415 */ 1416 KKASSERT(m == so->so_rcv.ssb_mb); 1417 1418 /* 1419 * Skip any address mbufs prepending the record. 1420 */ 1421 if (pr->pr_flags & PR_ADDR) { 1422 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1423 orig_resid = 0; 1424 if (psa) 1425 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1426 if (flags & MSG_PEEK) 1427 m = m->m_next; 1428 else 1429 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1430 } 1431 1432 /* 1433 * Skip any control mbufs prepending the record. 1434 */ 1435 while (m && m->m_type == MT_CONTROL && error == 0) { 1436 if (flags & MSG_PEEK) { 1437 if (controlp) 1438 *controlp = m_copy(m, 0, m->m_len); 1439 m = m->m_next; /* XXX race */ 1440 } else { 1441 if (controlp) { 1442 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1443 if (pr->pr_domain->dom_externalize && 1444 mtod(m, struct cmsghdr *)->cmsg_type == 1445 SCM_RIGHTS) 1446 error = (*pr->pr_domain->dom_externalize)(m); 1447 *controlp = m; 1448 m = n; 1449 } else { 1450 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1451 } 1452 } 1453 if (controlp && *controlp) { 1454 orig_resid = 0; 1455 controlp = &(*controlp)->m_next; 1456 } 1457 } 1458 1459 /* 1460 * flag OOB data. 1461 */ 1462 if (m) { 1463 type = m->m_type; 1464 if (type == MT_OOBDATA) 1465 flags |= MSG_OOB; 1466 } 1467 1468 /* 1469 * Copy to the UIO or mbuf return chain (*mp). 1470 */ 1471 moff = 0; 1472 offset = 0; 1473 while (m && resid > 0 && error == 0) { 1474 if (m->m_type == MT_OOBDATA) { 1475 if (type != MT_OOBDATA) 1476 break; 1477 } else if (type == MT_OOBDATA) 1478 break; 1479 else 1480 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1481 ("receive 3")); 1482 soclrstate(so, SS_RCVATMARK); 1483 len = (resid > INT_MAX) ? INT_MAX : resid; 1484 if (so->so_oobmark && len > so->so_oobmark - offset) 1485 len = so->so_oobmark - offset; 1486 if (len > m->m_len - moff) 1487 len = m->m_len - moff; 1488 1489 /* 1490 * Copy out to the UIO or pass the mbufs back to the SIO. 1491 * The SIO is dealt with when we eat the mbuf, but deal 1492 * with the resid here either way. 1493 */ 1494 if (uio) { 1495 uio->uio_resid = resid; 1496 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1497 resid = uio->uio_resid; 1498 if (error) 1499 goto release; 1500 } else { 1501 resid -= (size_t)len; 1502 } 1503 1504 /* 1505 * Eat the entire mbuf or just a piece of it 1506 */ 1507 if (len == m->m_len - moff) { 1508 if (m->m_flags & M_EOR) 1509 flags |= MSG_EOR; 1510 if (flags & MSG_PEEK) { 1511 m = m->m_next; 1512 moff = 0; 1513 } else { 1514 if (sio) { 1515 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1516 sbappend(sio, m); 1517 m = n; 1518 } else { 1519 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1520 } 1521 } 1522 } else { 1523 if (flags & MSG_PEEK) { 1524 moff += len; 1525 } else { 1526 if (sio) { 1527 n = m_copym(m, 0, len, M_WAITOK); 1528 if (n) 1529 sbappend(sio, n); 1530 } 1531 m->m_data += len; 1532 m->m_len -= len; 1533 so->so_rcv.ssb_cc -= len; 1534 } 1535 } 1536 if (so->so_oobmark) { 1537 if ((flags & MSG_PEEK) == 0) { 1538 so->so_oobmark -= len; 1539 if (so->so_oobmark == 0) { 1540 sosetstate(so, SS_RCVATMARK); 1541 break; 1542 } 1543 } else { 1544 offset += len; 1545 if (offset == so->so_oobmark) 1546 break; 1547 } 1548 } 1549 if (flags & MSG_EOR) 1550 break; 1551 /* 1552 * If the MSG_WAITALL flag is set (for non-atomic socket), 1553 * we must not quit until resid == 0 or an error 1554 * termination. If a signal/timeout occurs, return 1555 * with a short count but without error. 1556 * Keep signalsockbuf locked against other readers. 1557 */ 1558 while ((flags & MSG_WAITALL) && m == NULL && 1559 resid > 0 && !sosendallatonce(so) && 1560 so->so_rcv.ssb_mb == NULL) { 1561 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1562 break; 1563 /* 1564 * The window might have closed to zero, make 1565 * sure we send an ack now that we've drained 1566 * the buffer or we might end up blocking until 1567 * the idle takes over (5 seconds). 1568 */ 1569 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1570 so_pru_rcvd(so, flags); 1571 error = ssb_wait(&so->so_rcv); 1572 if (error) { 1573 ssb_unlock(&so->so_rcv); 1574 error = 0; 1575 goto done; 1576 } 1577 m = so->so_rcv.ssb_mb; 1578 } 1579 } 1580 1581 /* 1582 * If an atomic read was requested but unread data still remains 1583 * in the record, set MSG_TRUNC. 1584 */ 1585 if (m && pr->pr_flags & PR_ATOMIC) 1586 flags |= MSG_TRUNC; 1587 1588 /* 1589 * Cleanup. If an atomic read was requested drop any unread data. 1590 */ 1591 if ((flags & MSG_PEEK) == 0) { 1592 if (m && (pr->pr_flags & PR_ATOMIC)) 1593 sbdroprecord(&so->so_rcv.sb); 1594 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1595 so_pru_rcvd(so, flags); 1596 } 1597 1598 if (orig_resid == resid && orig_resid && 1599 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1600 ssb_unlock(&so->so_rcv); 1601 goto restart; 1602 } 1603 1604 if (flagsp) 1605 *flagsp |= flags; 1606 release: 1607 ssb_unlock(&so->so_rcv); 1608 done: 1609 lwkt_reltoken(&so->so_rcv.ssb_token); 1610 if (free_chain) 1611 m_freem(free_chain); 1612 return (error); 1613 } 1614 1615 int 1616 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1617 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1618 { 1619 struct mbuf *m, *n; 1620 struct mbuf *free_chain = NULL; 1621 int flags, len, error, offset; 1622 struct protosw *pr = so->so_proto; 1623 int moff; 1624 int didoob; 1625 size_t resid, orig_resid, restmp; 1626 1627 if (uio) 1628 resid = uio->uio_resid; 1629 else 1630 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1631 orig_resid = resid; 1632 1633 if (psa) 1634 *psa = NULL; 1635 if (controlp) 1636 *controlp = NULL; 1637 if (flagsp) 1638 flags = *flagsp &~ MSG_EOR; 1639 else 1640 flags = 0; 1641 if (flags & MSG_OOB) { 1642 m = m_get(M_WAITOK, MT_DATA); 1643 if (m == NULL) 1644 return (ENOBUFS); 1645 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1646 if (error) 1647 goto bad; 1648 if (sio) { 1649 do { 1650 sbappend(sio, m); 1651 KKASSERT(resid >= (size_t)m->m_len); 1652 resid -= (size_t)m->m_len; 1653 } while (resid > 0 && m); 1654 } else { 1655 do { 1656 uio->uio_resid = resid; 1657 error = uiomove(mtod(m, caddr_t), 1658 (int)szmin(resid, m->m_len), 1659 uio); 1660 resid = uio->uio_resid; 1661 m = m_free(m); 1662 } while (uio->uio_resid && error == 0 && m); 1663 } 1664 bad: 1665 if (m) 1666 m_freem(m); 1667 return (error); 1668 } 1669 1670 /* 1671 * The token interlocks against the protocol thread while 1672 * ssb_lock is a blocking lock against other userland entities. 1673 * 1674 * Lock a limited number of mbufs (not all, so sbcompress() still 1675 * works well). The token is used as an interlock for sbwait() so 1676 * release it afterwords. 1677 */ 1678 restart: 1679 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1680 if (error) 1681 goto done; 1682 1683 lwkt_gettoken(&so->so_rcv.ssb_token); 1684 m = so->so_rcv.ssb_mb; 1685 1686 /* 1687 * If we have less data than requested, block awaiting more 1688 * (subject to any timeout) if: 1689 * 1. the current count is less than the low water mark, or 1690 * 2. MSG_WAITALL is set, and it is possible to do the entire 1691 * receive operation at once if we block (resid <= hiwat). 1692 * 3. MSG_DONTWAIT is not set 1693 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1694 * we have to do the receive in sections, and thus risk returning 1695 * a short count if a timeout or signal occurs after we start. 1696 */ 1697 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1698 (size_t)so->so_rcv.ssb_cc < resid) && 1699 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1700 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1701 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1702 if (so->so_error) { 1703 if (m) 1704 goto dontblock; 1705 lwkt_reltoken(&so->so_rcv.ssb_token); 1706 error = so->so_error; 1707 if ((flags & MSG_PEEK) == 0) 1708 so->so_error = 0; 1709 goto release; 1710 } 1711 if (so->so_state & SS_CANTRCVMORE) { 1712 if (m) 1713 goto dontblock; 1714 lwkt_reltoken(&so->so_rcv.ssb_token); 1715 goto release; 1716 } 1717 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1718 (pr->pr_flags & PR_CONNREQUIRED)) { 1719 lwkt_reltoken(&so->so_rcv.ssb_token); 1720 error = ENOTCONN; 1721 goto release; 1722 } 1723 if (resid == 0) { 1724 lwkt_reltoken(&so->so_rcv.ssb_token); 1725 goto release; 1726 } 1727 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1728 lwkt_reltoken(&so->so_rcv.ssb_token); 1729 error = EWOULDBLOCK; 1730 goto release; 1731 } 1732 ssb_unlock(&so->so_rcv); 1733 error = ssb_wait(&so->so_rcv); 1734 lwkt_reltoken(&so->so_rcv.ssb_token); 1735 if (error) 1736 goto done; 1737 goto restart; 1738 } 1739 1740 /* 1741 * Token still held 1742 */ 1743 dontblock: 1744 n = m; 1745 restmp = 0; 1746 while (n && restmp < resid) { 1747 n->m_flags |= M_SOLOCKED; 1748 restmp += n->m_len; 1749 if (n->m_next == NULL) 1750 n = n->m_nextpkt; 1751 else 1752 n = n->m_next; 1753 } 1754 1755 /* 1756 * Release token for loop 1757 */ 1758 lwkt_reltoken(&so->so_rcv.ssb_token); 1759 if (uio && uio->uio_td && uio->uio_td->td_proc) 1760 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1761 1762 /* 1763 * note: m should be == sb_mb here. Cache the next record while 1764 * cleaning up. Note that calling m_free*() will break out critical 1765 * section. 1766 */ 1767 KKASSERT(m == so->so_rcv.ssb_mb); 1768 1769 /* 1770 * Copy to the UIO or mbuf return chain (*mp). 1771 * 1772 * NOTE: Token is not held for loop 1773 */ 1774 moff = 0; 1775 offset = 0; 1776 didoob = 0; 1777 1778 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1779 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1780 ("receive 3")); 1781 1782 soclrstate(so, SS_RCVATMARK); 1783 len = (resid > INT_MAX) ? INT_MAX : resid; 1784 if (so->so_oobmark && len > so->so_oobmark - offset) 1785 len = so->so_oobmark - offset; 1786 if (len > m->m_len - moff) 1787 len = m->m_len - moff; 1788 1789 /* 1790 * Copy out to the UIO or pass the mbufs back to the SIO. 1791 * The SIO is dealt with when we eat the mbuf, but deal 1792 * with the resid here either way. 1793 */ 1794 if (uio) { 1795 uio->uio_resid = resid; 1796 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1797 resid = uio->uio_resid; 1798 if (error) 1799 goto release; 1800 } else { 1801 resid -= (size_t)len; 1802 } 1803 1804 /* 1805 * Eat the entire mbuf or just a piece of it 1806 */ 1807 offset += len; 1808 if (len == m->m_len - moff) { 1809 m = m->m_next; 1810 moff = 0; 1811 } else { 1812 moff += len; 1813 } 1814 1815 /* 1816 * Check oobmark 1817 */ 1818 if (so->so_oobmark && offset == so->so_oobmark) { 1819 didoob = 1; 1820 break; 1821 } 1822 } 1823 1824 /* 1825 * Synchronize sockbuf with data we read. 1826 * 1827 * NOTE: (m) is junk on entry (it could be left over from the 1828 * previous loop). 1829 */ 1830 if ((flags & MSG_PEEK) == 0) { 1831 lwkt_gettoken(&so->so_rcv.ssb_token); 1832 m = so->so_rcv.ssb_mb; 1833 while (m && offset >= m->m_len) { 1834 if (so->so_oobmark) { 1835 so->so_oobmark -= m->m_len; 1836 if (so->so_oobmark == 0) { 1837 sosetstate(so, SS_RCVATMARK); 1838 didoob = 1; 1839 } 1840 } 1841 offset -= m->m_len; 1842 if (sio) { 1843 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1844 sbappend(sio, m); 1845 m = n; 1846 } else { 1847 m = sbunlinkmbuf(&so->so_rcv.sb, 1848 m, &free_chain); 1849 } 1850 } 1851 if (offset) { 1852 KKASSERT(m); 1853 if (sio) { 1854 n = m_copym(m, 0, offset, M_WAITOK); 1855 if (n) 1856 sbappend(sio, n); 1857 } 1858 m->m_data += offset; 1859 m->m_len -= offset; 1860 so->so_rcv.ssb_cc -= offset; 1861 if (so->so_oobmark) { 1862 so->so_oobmark -= offset; 1863 if (so->so_oobmark == 0) { 1864 sosetstate(so, SS_RCVATMARK); 1865 didoob = 1; 1866 } 1867 } 1868 offset = 0; 1869 } 1870 lwkt_reltoken(&so->so_rcv.ssb_token); 1871 } 1872 1873 /* 1874 * If the MSG_WAITALL flag is set (for non-atomic socket), 1875 * we must not quit until resid == 0 or an error termination. 1876 * 1877 * If a signal/timeout occurs, return with a short count but without 1878 * error. 1879 * 1880 * Keep signalsockbuf locked against other readers. 1881 * 1882 * XXX if MSG_PEEK we currently do quit. 1883 */ 1884 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1885 didoob == 0 && resid > 0 && 1886 !sosendallatonce(so)) { 1887 lwkt_gettoken(&so->so_rcv.ssb_token); 1888 error = 0; 1889 while ((m = so->so_rcv.ssb_mb) == NULL) { 1890 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1891 error = so->so_error; 1892 break; 1893 } 1894 /* 1895 * The window might have closed to zero, make 1896 * sure we send an ack now that we've drained 1897 * the buffer or we might end up blocking until 1898 * the idle takes over (5 seconds). 1899 */ 1900 if (so->so_pcb) 1901 so_pru_rcvd_async(so); 1902 if (so->so_rcv.ssb_mb == NULL) 1903 error = ssb_wait(&so->so_rcv); 1904 if (error) { 1905 lwkt_reltoken(&so->so_rcv.ssb_token); 1906 ssb_unlock(&so->so_rcv); 1907 error = 0; 1908 goto done; 1909 } 1910 } 1911 if (m && error == 0) 1912 goto dontblock; 1913 lwkt_reltoken(&so->so_rcv.ssb_token); 1914 } 1915 1916 /* 1917 * Token not held here. 1918 * 1919 * Cleanup. If an atomic read was requested drop any unread data XXX 1920 */ 1921 if ((flags & MSG_PEEK) == 0) { 1922 if (so->so_pcb) 1923 so_pru_rcvd_async(so); 1924 } 1925 1926 if (orig_resid == resid && orig_resid && 1927 (so->so_state & SS_CANTRCVMORE) == 0) { 1928 ssb_unlock(&so->so_rcv); 1929 goto restart; 1930 } 1931 1932 if (flagsp) 1933 *flagsp |= flags; 1934 release: 1935 ssb_unlock(&so->so_rcv); 1936 done: 1937 if (free_chain) 1938 m_freem(free_chain); 1939 return (error); 1940 } 1941 1942 /* 1943 * Shut a socket down. Note that we do not get a frontend lock as we 1944 * want to be able to shut the socket down even if another thread is 1945 * blocked in a read(), thus waking it up. 1946 */ 1947 int 1948 soshutdown(struct socket *so, int how) 1949 { 1950 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1951 return (EINVAL); 1952 1953 if (how != SHUT_WR) { 1954 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1955 sorflush(so); 1956 /*ssb_unlock(&so->so_rcv);*/ 1957 } 1958 if (how != SHUT_RD) 1959 return (so_pru_shutdown(so)); 1960 return (0); 1961 } 1962 1963 void 1964 sorflush(struct socket *so) 1965 { 1966 struct signalsockbuf *ssb = &so->so_rcv; 1967 struct protosw *pr = so->so_proto; 1968 struct signalsockbuf asb; 1969 1970 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1971 1972 lwkt_gettoken(&ssb->ssb_token); 1973 socantrcvmore(so); 1974 asb = *ssb; 1975 1976 /* 1977 * Can't just blow up the ssb structure here 1978 */ 1979 bzero(&ssb->sb, sizeof(ssb->sb)); 1980 ssb->ssb_timeo = 0; 1981 ssb->ssb_lowat = 0; 1982 ssb->ssb_hiwat = 0; 1983 ssb->ssb_mbmax = 0; 1984 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1985 1986 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1987 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1988 ssb_release(&asb, so); 1989 1990 lwkt_reltoken(&ssb->ssb_token); 1991 } 1992 1993 #ifdef INET 1994 static int 1995 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1996 { 1997 struct accept_filter_arg *afap = NULL; 1998 struct accept_filter *afp; 1999 struct so_accf *af = so->so_accf; 2000 int error = 0; 2001 2002 /* do not set/remove accept filters on non listen sockets */ 2003 if ((so->so_options & SO_ACCEPTCONN) == 0) { 2004 error = EINVAL; 2005 goto out; 2006 } 2007 2008 /* removing the filter */ 2009 if (sopt == NULL) { 2010 if (af != NULL) { 2011 if (af->so_accept_filter != NULL && 2012 af->so_accept_filter->accf_destroy != NULL) { 2013 af->so_accept_filter->accf_destroy(so); 2014 } 2015 if (af->so_accept_filter_str != NULL) { 2016 kfree(af->so_accept_filter_str, M_ACCF); 2017 } 2018 kfree(af, M_ACCF); 2019 so->so_accf = NULL; 2020 } 2021 so->so_options &= ~SO_ACCEPTFILTER; 2022 return (0); 2023 } 2024 /* adding a filter */ 2025 /* must remove previous filter first */ 2026 if (af != NULL) { 2027 error = EINVAL; 2028 goto out; 2029 } 2030 /* don't put large objects on the kernel stack */ 2031 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2032 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2033 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2034 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2035 if (error) 2036 goto out; 2037 afp = accept_filt_get(afap->af_name); 2038 if (afp == NULL) { 2039 error = ENOENT; 2040 goto out; 2041 } 2042 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2043 if (afp->accf_create != NULL) { 2044 if (afap->af_name[0] != '\0') { 2045 int len = strlen(afap->af_name) + 1; 2046 2047 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2048 M_WAITOK); 2049 strcpy(af->so_accept_filter_str, afap->af_name); 2050 } 2051 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2052 if (af->so_accept_filter_arg == NULL) { 2053 kfree(af->so_accept_filter_str, M_ACCF); 2054 kfree(af, M_ACCF); 2055 so->so_accf = NULL; 2056 error = EINVAL; 2057 goto out; 2058 } 2059 } 2060 af->so_accept_filter = afp; 2061 so->so_accf = af; 2062 so->so_options |= SO_ACCEPTFILTER; 2063 out: 2064 if (afap != NULL) 2065 kfree(afap, M_TEMP); 2066 return (error); 2067 } 2068 #endif /* INET */ 2069 2070 /* 2071 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2072 * an additional variant to handle the case where the option value needs 2073 * to be some kind of integer, but not a specific size. 2074 * In addition to their use here, these functions are also called by the 2075 * protocol-level pr_ctloutput() routines. 2076 */ 2077 int 2078 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2079 { 2080 return soopt_to_kbuf(sopt, buf, len, minlen); 2081 } 2082 2083 int 2084 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2085 { 2086 size_t valsize; 2087 2088 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2089 KKASSERT(kva_p(buf)); 2090 2091 /* 2092 * If the user gives us more than we wanted, we ignore it, 2093 * but if we don't get the minimum length the caller 2094 * wants, we return EINVAL. On success, sopt->sopt_valsize 2095 * is set to however much we actually retrieved. 2096 */ 2097 if ((valsize = sopt->sopt_valsize) < minlen) 2098 return EINVAL; 2099 if (valsize > len) 2100 sopt->sopt_valsize = valsize = len; 2101 2102 bcopy(sopt->sopt_val, buf, valsize); 2103 return 0; 2104 } 2105 2106 2107 int 2108 sosetopt(struct socket *so, struct sockopt *sopt) 2109 { 2110 int error, optval; 2111 struct linger l; 2112 struct timeval tv; 2113 u_long val; 2114 struct signalsockbuf *sotmp; 2115 2116 error = 0; 2117 sopt->sopt_dir = SOPT_SET; 2118 if (sopt->sopt_level != SOL_SOCKET) { 2119 if (so->so_proto && so->so_proto->pr_ctloutput) { 2120 return (so_pr_ctloutput(so, sopt)); 2121 } 2122 error = ENOPROTOOPT; 2123 } else { 2124 switch (sopt->sopt_name) { 2125 #ifdef INET 2126 case SO_ACCEPTFILTER: 2127 error = do_setopt_accept_filter(so, sopt); 2128 if (error) 2129 goto bad; 2130 break; 2131 #endif /* INET */ 2132 case SO_LINGER: 2133 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2134 if (error) 2135 goto bad; 2136 2137 so->so_linger = l.l_linger; 2138 if (l.l_onoff) 2139 so->so_options |= SO_LINGER; 2140 else 2141 so->so_options &= ~SO_LINGER; 2142 break; 2143 2144 case SO_DEBUG: 2145 case SO_KEEPALIVE: 2146 case SO_DONTROUTE: 2147 case SO_USELOOPBACK: 2148 case SO_BROADCAST: 2149 case SO_REUSEADDR: 2150 case SO_REUSEPORT: 2151 case SO_OOBINLINE: 2152 case SO_TIMESTAMP: 2153 case SO_NOSIGPIPE: 2154 error = sooptcopyin(sopt, &optval, sizeof optval, 2155 sizeof optval); 2156 if (error) 2157 goto bad; 2158 if (optval) 2159 so->so_options |= sopt->sopt_name; 2160 else 2161 so->so_options &= ~sopt->sopt_name; 2162 break; 2163 2164 case SO_SNDBUF: 2165 case SO_RCVBUF: 2166 case SO_SNDLOWAT: 2167 case SO_RCVLOWAT: 2168 error = sooptcopyin(sopt, &optval, sizeof optval, 2169 sizeof optval); 2170 if (error) 2171 goto bad; 2172 2173 /* 2174 * Values < 1 make no sense for any of these 2175 * options, so disallow them. 2176 */ 2177 if (optval < 1) { 2178 error = EINVAL; 2179 goto bad; 2180 } 2181 2182 switch (sopt->sopt_name) { 2183 case SO_SNDBUF: 2184 case SO_RCVBUF: 2185 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2186 &so->so_snd : &so->so_rcv, (u_long)optval, 2187 so, 2188 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2189 error = ENOBUFS; 2190 goto bad; 2191 } 2192 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2193 &so->so_snd : &so->so_rcv; 2194 atomic_clear_int(&sotmp->ssb_flags, 2195 SSB_AUTOSIZE); 2196 break; 2197 2198 /* 2199 * Make sure the low-water is never greater than 2200 * the high-water. 2201 */ 2202 case SO_SNDLOWAT: 2203 so->so_snd.ssb_lowat = 2204 (optval > so->so_snd.ssb_hiwat) ? 2205 so->so_snd.ssb_hiwat : optval; 2206 atomic_clear_int(&so->so_snd.ssb_flags, 2207 SSB_AUTOLOWAT); 2208 break; 2209 case SO_RCVLOWAT: 2210 so->so_rcv.ssb_lowat = 2211 (optval > so->so_rcv.ssb_hiwat) ? 2212 so->so_rcv.ssb_hiwat : optval; 2213 atomic_clear_int(&so->so_rcv.ssb_flags, 2214 SSB_AUTOLOWAT); 2215 break; 2216 } 2217 break; 2218 2219 case SO_SNDTIMEO: 2220 case SO_RCVTIMEO: 2221 error = sooptcopyin(sopt, &tv, sizeof tv, 2222 sizeof tv); 2223 if (error) 2224 goto bad; 2225 2226 /* assert(hz > 0); */ 2227 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2228 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2229 error = EDOM; 2230 goto bad; 2231 } 2232 /* assert(tick > 0); */ 2233 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2234 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2235 if (val > INT_MAX) { 2236 error = EDOM; 2237 goto bad; 2238 } 2239 if (val == 0 && tv.tv_usec != 0) 2240 val = 1; 2241 2242 switch (sopt->sopt_name) { 2243 case SO_SNDTIMEO: 2244 so->so_snd.ssb_timeo = val; 2245 break; 2246 case SO_RCVTIMEO: 2247 so->so_rcv.ssb_timeo = val; 2248 break; 2249 } 2250 break; 2251 default: 2252 error = ENOPROTOOPT; 2253 break; 2254 } 2255 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2256 (void) so_pr_ctloutput(so, sopt); 2257 } 2258 } 2259 bad: 2260 return (error); 2261 } 2262 2263 /* Helper routine for getsockopt */ 2264 int 2265 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2266 { 2267 soopt_from_kbuf(sopt, buf, len); 2268 return 0; 2269 } 2270 2271 void 2272 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2273 { 2274 size_t valsize; 2275 2276 if (len == 0) { 2277 sopt->sopt_valsize = 0; 2278 return; 2279 } 2280 2281 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2282 KKASSERT(kva_p(buf)); 2283 2284 /* 2285 * Documented get behavior is that we always return a value, 2286 * possibly truncated to fit in the user's buffer. 2287 * Traditional behavior is that we always tell the user 2288 * precisely how much we copied, rather than something useful 2289 * like the total amount we had available for her. 2290 * Note that this interface is not idempotent; the entire answer must 2291 * generated ahead of time. 2292 */ 2293 valsize = szmin(len, sopt->sopt_valsize); 2294 sopt->sopt_valsize = valsize; 2295 if (sopt->sopt_val != 0) { 2296 bcopy(buf, sopt->sopt_val, valsize); 2297 } 2298 } 2299 2300 int 2301 sogetopt(struct socket *so, struct sockopt *sopt) 2302 { 2303 int error, optval; 2304 long optval_l; 2305 struct linger l; 2306 struct timeval tv; 2307 #ifdef INET 2308 struct accept_filter_arg *afap; 2309 #endif 2310 2311 error = 0; 2312 sopt->sopt_dir = SOPT_GET; 2313 if (sopt->sopt_level != SOL_SOCKET) { 2314 if (so->so_proto && so->so_proto->pr_ctloutput) { 2315 return (so_pr_ctloutput(so, sopt)); 2316 } else 2317 return (ENOPROTOOPT); 2318 } else { 2319 switch (sopt->sopt_name) { 2320 #ifdef INET 2321 case SO_ACCEPTFILTER: 2322 if ((so->so_options & SO_ACCEPTCONN) == 0) 2323 return (EINVAL); 2324 afap = kmalloc(sizeof(*afap), M_TEMP, 2325 M_WAITOK | M_ZERO); 2326 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2327 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2328 if (so->so_accf->so_accept_filter_str != NULL) 2329 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2330 } 2331 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2332 kfree(afap, M_TEMP); 2333 break; 2334 #endif /* INET */ 2335 2336 case SO_LINGER: 2337 l.l_onoff = so->so_options & SO_LINGER; 2338 l.l_linger = so->so_linger; 2339 error = sooptcopyout(sopt, &l, sizeof l); 2340 break; 2341 2342 case SO_USELOOPBACK: 2343 case SO_DONTROUTE: 2344 case SO_DEBUG: 2345 case SO_KEEPALIVE: 2346 case SO_REUSEADDR: 2347 case SO_REUSEPORT: 2348 case SO_BROADCAST: 2349 case SO_OOBINLINE: 2350 case SO_TIMESTAMP: 2351 case SO_NOSIGPIPE: 2352 optval = so->so_options & sopt->sopt_name; 2353 integer: 2354 error = sooptcopyout(sopt, &optval, sizeof optval); 2355 break; 2356 2357 case SO_TYPE: 2358 optval = so->so_type; 2359 goto integer; 2360 2361 case SO_ERROR: 2362 optval = so->so_error; 2363 so->so_error = 0; 2364 goto integer; 2365 2366 case SO_SNDBUF: 2367 optval = so->so_snd.ssb_hiwat; 2368 goto integer; 2369 2370 case SO_RCVBUF: 2371 optval = so->so_rcv.ssb_hiwat; 2372 goto integer; 2373 2374 case SO_SNDLOWAT: 2375 optval = so->so_snd.ssb_lowat; 2376 goto integer; 2377 2378 case SO_RCVLOWAT: 2379 optval = so->so_rcv.ssb_lowat; 2380 goto integer; 2381 2382 case SO_SNDTIMEO: 2383 case SO_RCVTIMEO: 2384 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2385 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2386 2387 tv.tv_sec = optval / hz; 2388 tv.tv_usec = (optval % hz) * ustick; 2389 error = sooptcopyout(sopt, &tv, sizeof tv); 2390 break; 2391 2392 case SO_SNDSPACE: 2393 optval_l = ssb_space(&so->so_snd); 2394 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2395 break; 2396 2397 case SO_CPUHINT: 2398 optval = -1; /* no hint */ 2399 goto integer; 2400 2401 default: 2402 error = ENOPROTOOPT; 2403 break; 2404 } 2405 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2406 so_pr_ctloutput(so, sopt); 2407 return (error); 2408 } 2409 } 2410 2411 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2412 int 2413 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2414 { 2415 struct mbuf *m, *m_prev; 2416 int sopt_size = sopt->sopt_valsize, msize; 2417 2418 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2419 0, &msize); 2420 if (m == NULL) 2421 return (ENOBUFS); 2422 m->m_len = min(msize, sopt_size); 2423 sopt_size -= m->m_len; 2424 *mp = m; 2425 m_prev = m; 2426 2427 while (sopt_size > 0) { 2428 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2429 MT_DATA, 0, &msize); 2430 if (m == NULL) { 2431 m_freem(*mp); 2432 return (ENOBUFS); 2433 } 2434 m->m_len = min(msize, sopt_size); 2435 sopt_size -= m->m_len; 2436 m_prev->m_next = m; 2437 m_prev = m; 2438 } 2439 return (0); 2440 } 2441 2442 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2443 int 2444 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2445 { 2446 soopt_to_mbuf(sopt, m); 2447 return 0; 2448 } 2449 2450 void 2451 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2452 { 2453 size_t valsize; 2454 void *val; 2455 2456 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2457 KKASSERT(kva_p(m)); 2458 if (sopt->sopt_val == NULL) 2459 return; 2460 val = sopt->sopt_val; 2461 valsize = sopt->sopt_valsize; 2462 while (m != NULL && valsize >= m->m_len) { 2463 bcopy(val, mtod(m, char *), m->m_len); 2464 valsize -= m->m_len; 2465 val = (caddr_t)val + m->m_len; 2466 m = m->m_next; 2467 } 2468 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2469 panic("ip6_sooptmcopyin"); 2470 } 2471 2472 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2473 int 2474 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2475 { 2476 return soopt_from_mbuf(sopt, m); 2477 } 2478 2479 int 2480 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2481 { 2482 struct mbuf *m0 = m; 2483 size_t valsize = 0; 2484 size_t maxsize; 2485 void *val; 2486 2487 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2488 KKASSERT(kva_p(m)); 2489 if (sopt->sopt_val == NULL) 2490 return 0; 2491 val = sopt->sopt_val; 2492 maxsize = sopt->sopt_valsize; 2493 while (m != NULL && maxsize >= m->m_len) { 2494 bcopy(mtod(m, char *), val, m->m_len); 2495 maxsize -= m->m_len; 2496 val = (caddr_t)val + m->m_len; 2497 valsize += m->m_len; 2498 m = m->m_next; 2499 } 2500 if (m != NULL) { 2501 /* enough soopt buffer should be given from user-land */ 2502 m_freem(m0); 2503 return (EINVAL); 2504 } 2505 sopt->sopt_valsize = valsize; 2506 return 0; 2507 } 2508 2509 void 2510 sohasoutofband(struct socket *so) 2511 { 2512 if (so->so_sigio != NULL) 2513 pgsigio(so->so_sigio, SIGURG, 0); 2514 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2515 } 2516 2517 int 2518 sokqfilter(struct file *fp, struct knote *kn) 2519 { 2520 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2521 struct signalsockbuf *ssb; 2522 2523 switch (kn->kn_filter) { 2524 case EVFILT_READ: 2525 if (so->so_options & SO_ACCEPTCONN) 2526 kn->kn_fop = &solisten_filtops; 2527 else 2528 kn->kn_fop = &soread_filtops; 2529 ssb = &so->so_rcv; 2530 break; 2531 case EVFILT_WRITE: 2532 kn->kn_fop = &sowrite_filtops; 2533 ssb = &so->so_snd; 2534 break; 2535 case EVFILT_EXCEPT: 2536 kn->kn_fop = &soexcept_filtops; 2537 ssb = &so->so_rcv; 2538 break; 2539 default: 2540 return (EOPNOTSUPP); 2541 } 2542 2543 knote_insert(&ssb->ssb_kq.ki_note, kn); 2544 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2545 return (0); 2546 } 2547 2548 static void 2549 filt_sordetach(struct knote *kn) 2550 { 2551 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2552 2553 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2554 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2555 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2556 } 2557 2558 /*ARGSUSED*/ 2559 static int 2560 filt_soread(struct knote *kn, long hint) 2561 { 2562 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2563 2564 if (kn->kn_sfflags & NOTE_OOB) { 2565 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2566 kn->kn_fflags |= NOTE_OOB; 2567 return (1); 2568 } 2569 return (0); 2570 } 2571 kn->kn_data = so->so_rcv.ssb_cc; 2572 2573 if (so->so_state & SS_CANTRCVMORE) { 2574 /* 2575 * Only set NODATA if all data has been exhausted. 2576 */ 2577 if (kn->kn_data == 0) 2578 kn->kn_flags |= EV_NODATA; 2579 kn->kn_flags |= EV_EOF; 2580 kn->kn_fflags = so->so_error; 2581 return (1); 2582 } 2583 if (so->so_error) /* temporary udp error */ 2584 return (1); 2585 if (kn->kn_sfflags & NOTE_LOWAT) 2586 return (kn->kn_data >= kn->kn_sdata); 2587 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2588 !TAILQ_EMPTY(&so->so_comp)); 2589 } 2590 2591 static void 2592 filt_sowdetach(struct knote *kn) 2593 { 2594 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2595 2596 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2597 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2598 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2599 } 2600 2601 /*ARGSUSED*/ 2602 static int 2603 filt_sowrite(struct knote *kn, long hint) 2604 { 2605 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2606 2607 kn->kn_data = ssb_space(&so->so_snd); 2608 if (so->so_state & SS_CANTSENDMORE) { 2609 kn->kn_flags |= (EV_EOF | EV_NODATA); 2610 kn->kn_fflags = so->so_error; 2611 return (1); 2612 } 2613 if (so->so_error) /* temporary udp error */ 2614 return (1); 2615 if (((so->so_state & SS_ISCONNECTED) == 0) && 2616 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2617 return (0); 2618 if (kn->kn_sfflags & NOTE_LOWAT) 2619 return (kn->kn_data >= kn->kn_sdata); 2620 return (kn->kn_data >= so->so_snd.ssb_lowat); 2621 } 2622 2623 /*ARGSUSED*/ 2624 static int 2625 filt_solisten(struct knote *kn, long hint) 2626 { 2627 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2628 2629 kn->kn_data = so->so_qlen; 2630 return (! TAILQ_EMPTY(&so->so_comp)); 2631 } 2632