1 /* $NetBSD: uipc_socket.c,v 1.203 2011/02/01 01:39:20 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 2004 The FreeBSD Foundation 34 * Copyright (c) 2004 Robert Watson 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.203 2011/02/01 01:39:20 matt Exp $"); 67 68 #include "opt_compat_netbsd.h" 69 #include "opt_sock_counters.h" 70 #include "opt_sosend_loan.h" 71 #include "opt_mbuftrace.h" 72 #include "opt_somaxkva.h" 73 #include "opt_multiprocessor.h" /* XXX */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/file.h> 79 #include <sys/filedesc.h> 80 #include <sys/kmem.h> 81 #include <sys/mbuf.h> 82 #include <sys/domain.h> 83 #include <sys/kernel.h> 84 #include <sys/protosw.h> 85 #include <sys/socket.h> 86 #include <sys/socketvar.h> 87 #include <sys/signalvar.h> 88 #include <sys/resourcevar.h> 89 #include <sys/uidinfo.h> 90 #include <sys/event.h> 91 #include <sys/poll.h> 92 #include <sys/kauth.h> 93 #include <sys/mutex.h> 94 #include <sys/condvar.h> 95 96 #ifdef COMPAT_50 97 #include <compat/sys/time.h> 98 #include <compat/sys/socket.h> 99 #endif 100 101 #include <uvm/uvm_extern.h> 102 #include <uvm/uvm_loan.h> 103 #include <uvm/uvm_page.h> 104 105 MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options"); 106 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 107 108 extern const struct fileops socketops; 109 110 extern int somaxconn; /* patchable (XXX sysctl) */ 111 int somaxconn = SOMAXCONN; 112 kmutex_t *softnet_lock; 113 114 #ifdef SOSEND_COUNTERS 115 #include <sys/device.h> 116 117 static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 118 NULL, "sosend", "loan big"); 119 static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 120 NULL, "sosend", "copy big"); 121 static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 122 NULL, "sosend", "copy small"); 123 static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 124 NULL, "sosend", "kva limit"); 125 126 #define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++ 127 128 EVCNT_ATTACH_STATIC(sosend_loan_big); 129 EVCNT_ATTACH_STATIC(sosend_copy_big); 130 EVCNT_ATTACH_STATIC(sosend_copy_small); 131 EVCNT_ATTACH_STATIC(sosend_kvalimit); 132 #else 133 134 #define SOSEND_COUNTER_INCR(ev) /* nothing */ 135 136 #endif /* SOSEND_COUNTERS */ 137 138 static struct callback_entry sokva_reclaimerentry; 139 140 #if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR) 141 int sock_loan_thresh = -1; 142 #else 143 int sock_loan_thresh = 4096; 144 #endif 145 146 static kmutex_t so_pendfree_lock; 147 static struct mbuf *so_pendfree; 148 149 #ifndef SOMAXKVA 150 #define SOMAXKVA (16 * 1024 * 1024) 151 #endif 152 int somaxkva = SOMAXKVA; 153 static int socurkva; 154 static kcondvar_t socurkva_cv; 155 156 static kauth_listener_t socket_listener; 157 158 #define SOCK_LOAN_CHUNK 65536 159 160 static size_t sodopendfree(void); 161 static size_t sodopendfreel(void); 162 163 static void sysctl_kern_somaxkva_setup(void); 164 static struct sysctllog *socket_sysctllog; 165 166 static vsize_t 167 sokvareserve(struct socket *so, vsize_t len) 168 { 169 int error; 170 171 mutex_enter(&so_pendfree_lock); 172 while (socurkva + len > somaxkva) { 173 size_t freed; 174 175 /* 176 * try to do pendfree. 177 */ 178 179 freed = sodopendfreel(); 180 181 /* 182 * if some kva was freed, try again. 183 */ 184 185 if (freed) 186 continue; 187 188 SOSEND_COUNTER_INCR(&sosend_kvalimit); 189 error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock); 190 if (error) { 191 len = 0; 192 break; 193 } 194 } 195 socurkva += len; 196 mutex_exit(&so_pendfree_lock); 197 return len; 198 } 199 200 static void 201 sokvaunreserve(vsize_t len) 202 { 203 204 mutex_enter(&so_pendfree_lock); 205 socurkva -= len; 206 cv_broadcast(&socurkva_cv); 207 mutex_exit(&so_pendfree_lock); 208 } 209 210 /* 211 * sokvaalloc: allocate kva for loan. 212 */ 213 214 vaddr_t 215 sokvaalloc(vsize_t len, struct socket *so) 216 { 217 vaddr_t lva; 218 219 /* 220 * reserve kva. 221 */ 222 223 if (sokvareserve(so, len) == 0) 224 return 0; 225 226 /* 227 * allocate kva. 228 */ 229 230 lva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 231 if (lva == 0) { 232 sokvaunreserve(len); 233 return (0); 234 } 235 236 return lva; 237 } 238 239 /* 240 * sokvafree: free kva for loan. 241 */ 242 243 void 244 sokvafree(vaddr_t sva, vsize_t len) 245 { 246 247 /* 248 * free kva. 249 */ 250 251 uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY); 252 253 /* 254 * unreserve kva. 255 */ 256 257 sokvaunreserve(len); 258 } 259 260 static void 261 sodoloanfree(struct vm_page **pgs, void *buf, size_t size) 262 { 263 vaddr_t sva, eva; 264 vsize_t len; 265 int npgs; 266 267 KASSERT(pgs != NULL); 268 269 eva = round_page((vaddr_t) buf + size); 270 sva = trunc_page((vaddr_t) buf); 271 len = eva - sva; 272 npgs = len >> PAGE_SHIFT; 273 274 pmap_kremove(sva, len); 275 pmap_update(pmap_kernel()); 276 uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE); 277 sokvafree(sva, len); 278 } 279 280 static size_t 281 sodopendfree(void) 282 { 283 size_t rv; 284 285 if (__predict_true(so_pendfree == NULL)) 286 return 0; 287 288 mutex_enter(&so_pendfree_lock); 289 rv = sodopendfreel(); 290 mutex_exit(&so_pendfree_lock); 291 292 return rv; 293 } 294 295 /* 296 * sodopendfreel: free mbufs on "pendfree" list. 297 * unlock and relock so_pendfree_lock when freeing mbufs. 298 * 299 * => called with so_pendfree_lock held. 300 */ 301 302 static size_t 303 sodopendfreel(void) 304 { 305 struct mbuf *m, *next; 306 size_t rv = 0; 307 308 KASSERT(mutex_owned(&so_pendfree_lock)); 309 310 while (so_pendfree != NULL) { 311 m = so_pendfree; 312 so_pendfree = NULL; 313 mutex_exit(&so_pendfree_lock); 314 315 for (; m != NULL; m = next) { 316 next = m->m_next; 317 KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0); 318 KASSERT(m->m_ext.ext_refcnt == 0); 319 320 rv += m->m_ext.ext_size; 321 sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf, 322 m->m_ext.ext_size); 323 pool_cache_put(mb_cache, m); 324 } 325 326 mutex_enter(&so_pendfree_lock); 327 } 328 329 return (rv); 330 } 331 332 void 333 soloanfree(struct mbuf *m, void *buf, size_t size, void *arg) 334 { 335 336 KASSERT(m != NULL); 337 338 /* 339 * postpone freeing mbuf. 340 * 341 * we can't do it in interrupt context 342 * because we need to put kva back to kernel_map. 343 */ 344 345 mutex_enter(&so_pendfree_lock); 346 m->m_next = so_pendfree; 347 so_pendfree = m; 348 cv_broadcast(&socurkva_cv); 349 mutex_exit(&so_pendfree_lock); 350 } 351 352 static long 353 sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space) 354 { 355 struct iovec *iov = uio->uio_iov; 356 vaddr_t sva, eva; 357 vsize_t len; 358 vaddr_t lva; 359 int npgs, error; 360 vaddr_t va; 361 int i; 362 363 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) 364 return (0); 365 366 if (iov->iov_len < (size_t) space) 367 space = iov->iov_len; 368 if (space > SOCK_LOAN_CHUNK) 369 space = SOCK_LOAN_CHUNK; 370 371 eva = round_page((vaddr_t) iov->iov_base + space); 372 sva = trunc_page((vaddr_t) iov->iov_base); 373 len = eva - sva; 374 npgs = len >> PAGE_SHIFT; 375 376 KASSERT(npgs <= M_EXT_MAXPAGES); 377 378 lva = sokvaalloc(len, so); 379 if (lva == 0) 380 return 0; 381 382 error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len, 383 m->m_ext.ext_pgs, UVM_LOAN_TOPAGE); 384 if (error) { 385 sokvafree(lva, len); 386 return (0); 387 } 388 389 for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) 390 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]), 391 VM_PROT_READ, 0); 392 pmap_update(pmap_kernel()); 393 394 lva += (vaddr_t) iov->iov_base & PAGE_MASK; 395 396 MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so); 397 m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP; 398 399 uio->uio_resid -= space; 400 /* uio_offset not updated, not set/used for write(2) */ 401 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space; 402 uio->uio_iov->iov_len -= space; 403 if (uio->uio_iov->iov_len == 0) { 404 uio->uio_iov++; 405 uio->uio_iovcnt--; 406 } 407 408 return (space); 409 } 410 411 static int 412 sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) 413 { 414 415 KASSERT(ce == &sokva_reclaimerentry); 416 KASSERT(obj == NULL); 417 418 sodopendfree(); 419 if (!vm_map_starved_p(kernel_map)) { 420 return CALLBACK_CHAIN_ABORT; 421 } 422 return CALLBACK_CHAIN_CONTINUE; 423 } 424 425 struct mbuf * 426 getsombuf(struct socket *so, int type) 427 { 428 struct mbuf *m; 429 430 m = m_get(M_WAIT, type); 431 MCLAIM(m, so->so_mowner); 432 return m; 433 } 434 435 static int 436 socket_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 437 void *arg0, void *arg1, void *arg2, void *arg3) 438 { 439 int result; 440 enum kauth_network_req req; 441 442 result = KAUTH_RESULT_DEFER; 443 req = (enum kauth_network_req)arg0; 444 445 if ((action != KAUTH_NETWORK_SOCKET) && 446 (action != KAUTH_NETWORK_BIND)) 447 return result; 448 449 switch (req) { 450 case KAUTH_REQ_NETWORK_BIND_PORT: 451 result = KAUTH_RESULT_ALLOW; 452 break; 453 454 case KAUTH_REQ_NETWORK_SOCKET_DROP: { 455 /* Normal users can only drop their own connections. */ 456 struct socket *so = (struct socket *)arg1; 457 458 if (proc_uidmatch(cred, so->so_cred)) 459 result = KAUTH_RESULT_ALLOW; 460 461 break; 462 } 463 464 case KAUTH_REQ_NETWORK_SOCKET_OPEN: 465 /* We allow "raw" routing/bluetooth sockets to anyone. */ 466 if ((u_long)arg1 == PF_ROUTE || (u_long)arg1 == PF_OROUTE 467 || (u_long)arg1 == PF_BLUETOOTH) { 468 result = KAUTH_RESULT_ALLOW; 469 } else { 470 /* Privileged, let secmodel handle this. */ 471 if ((u_long)arg2 == SOCK_RAW) 472 break; 473 } 474 475 result = KAUTH_RESULT_ALLOW; 476 477 break; 478 479 case KAUTH_REQ_NETWORK_SOCKET_CANSEE: 480 result = KAUTH_RESULT_ALLOW; 481 482 break; 483 484 default: 485 break; 486 } 487 488 return result; 489 } 490 491 void 492 soinit(void) 493 { 494 495 sysctl_kern_somaxkva_setup(); 496 497 mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM); 498 softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 499 cv_init(&socurkva_cv, "sokva"); 500 soinit2(); 501 502 /* Set the initial adjusted socket buffer size. */ 503 if (sb_max_set(sb_max)) 504 panic("bad initial sb_max value: %lu", sb_max); 505 506 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, 507 &sokva_reclaimerentry, NULL, sokva_reclaim_callback); 508 509 socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 510 socket_listener_cb, NULL); 511 } 512 513 /* 514 * Socket operation routines. 515 * These routines are called by the routines in 516 * sys_socket.c or from a system process, and 517 * implement the semantics of socket operations by 518 * switching out to the protocol specific routines. 519 */ 520 /*ARGSUSED*/ 521 int 522 socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l, 523 struct socket *lockso) 524 { 525 const struct protosw *prp; 526 struct socket *so; 527 uid_t uid; 528 int error; 529 kmutex_t *lock; 530 531 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET, 532 KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type), 533 KAUTH_ARG(proto)); 534 if (error != 0) 535 return error; 536 537 if (proto) 538 prp = pffindproto(dom, proto, type); 539 else 540 prp = pffindtype(dom, type); 541 if (prp == NULL) { 542 /* no support for domain */ 543 if (pffinddomain(dom) == 0) 544 return EAFNOSUPPORT; 545 /* no support for socket type */ 546 if (proto == 0 && type != 0) 547 return EPROTOTYPE; 548 return EPROTONOSUPPORT; 549 } 550 if (prp->pr_usrreq == NULL) 551 return EPROTONOSUPPORT; 552 if (prp->pr_type != type) 553 return EPROTOTYPE; 554 555 so = soget(true); 556 so->so_type = type; 557 so->so_proto = prp; 558 so->so_send = sosend; 559 so->so_receive = soreceive; 560 #ifdef MBUFTRACE 561 so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner; 562 so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner; 563 so->so_mowner = &prp->pr_domain->dom_mowner; 564 #endif 565 uid = kauth_cred_geteuid(l->l_cred); 566 so->so_uidinfo = uid_find(uid); 567 so->so_cpid = l->l_proc->p_pid; 568 if (lockso != NULL) { 569 /* Caller wants us to share a lock. */ 570 lock = lockso->so_lock; 571 so->so_lock = lock; 572 mutex_obj_hold(lock); 573 mutex_enter(lock); 574 } else { 575 /* Lock assigned and taken during PRU_ATTACH. */ 576 } 577 error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL, 578 (struct mbuf *)(long)proto, NULL, l); 579 KASSERT(solocked(so)); 580 if (error != 0) { 581 so->so_state |= SS_NOFDREF; 582 sofree(so); 583 return error; 584 } 585 so->so_cred = kauth_cred_dup(l->l_cred); 586 sounlock(so); 587 *aso = so; 588 return 0; 589 } 590 591 /* On success, write file descriptor to fdout and return zero. On 592 * failure, return non-zero; *fdout will be undefined. 593 */ 594 int 595 fsocreate(int domain, struct socket **sop, int type, int protocol, 596 struct lwp *l, int *fdout) 597 { 598 struct socket *so; 599 struct file *fp; 600 int fd, error; 601 602 if ((error = fd_allocfile(&fp, &fd)) != 0) 603 return (error); 604 fp->f_flag = FREAD|FWRITE; 605 fp->f_type = DTYPE_SOCKET; 606 fp->f_ops = &socketops; 607 error = socreate(domain, &so, type, protocol, l, NULL); 608 if (error != 0) { 609 fd_abort(curproc, fp, fd); 610 } else { 611 if (sop != NULL) 612 *sop = so; 613 fp->f_data = so; 614 fd_affix(curproc, fp, fd); 615 *fdout = fd; 616 } 617 return error; 618 } 619 620 int 621 sofamily(const struct socket *so) 622 { 623 const struct protosw *pr; 624 const struct domain *dom; 625 626 if ((pr = so->so_proto) == NULL) 627 return AF_UNSPEC; 628 if ((dom = pr->pr_domain) == NULL) 629 return AF_UNSPEC; 630 return dom->dom_family; 631 } 632 633 int 634 sobind(struct socket *so, struct mbuf *nam, struct lwp *l) 635 { 636 int error; 637 638 solock(so); 639 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, l); 640 sounlock(so); 641 return error; 642 } 643 644 int 645 solisten(struct socket *so, int backlog, struct lwp *l) 646 { 647 int error; 648 649 solock(so); 650 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 651 SS_ISDISCONNECTING)) != 0) { 652 sounlock(so); 653 return (EOPNOTSUPP); 654 } 655 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, 656 NULL, NULL, l); 657 if (error != 0) { 658 sounlock(so); 659 return error; 660 } 661 if (TAILQ_EMPTY(&so->so_q)) 662 so->so_options |= SO_ACCEPTCONN; 663 if (backlog < 0) 664 backlog = 0; 665 so->so_qlimit = min(backlog, somaxconn); 666 sounlock(so); 667 return 0; 668 } 669 670 void 671 sofree(struct socket *so) 672 { 673 u_int refs; 674 675 KASSERT(solocked(so)); 676 677 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 678 sounlock(so); 679 return; 680 } 681 if (so->so_head) { 682 /* 683 * We must not decommission a socket that's on the accept(2) 684 * queue. If we do, then accept(2) may hang after select(2) 685 * indicated that the listening socket was ready. 686 */ 687 if (!soqremque(so, 0)) { 688 sounlock(so); 689 return; 690 } 691 } 692 if (so->so_rcv.sb_hiwat) 693 (void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0, 694 RLIM_INFINITY); 695 if (so->so_snd.sb_hiwat) 696 (void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0, 697 RLIM_INFINITY); 698 sbrelease(&so->so_snd, so); 699 KASSERT(!cv_has_waiters(&so->so_cv)); 700 KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv)); 701 KASSERT(!cv_has_waiters(&so->so_snd.sb_cv)); 702 sorflush(so); 703 refs = so->so_aborting; /* XXX */ 704 /* Remove acccept filter if one is present. */ 705 if (so->so_accf != NULL) 706 (void)accept_filt_clear(so); 707 sounlock(so); 708 if (refs == 0) /* XXX */ 709 soput(so); 710 } 711 712 /* 713 * Close a socket on last file table reference removal. 714 * Initiate disconnect if connected. 715 * Free socket when disconnect complete. 716 */ 717 int 718 soclose(struct socket *so) 719 { 720 struct socket *so2; 721 int error; 722 int error2; 723 724 error = 0; 725 solock(so); 726 if (so->so_options & SO_ACCEPTCONN) { 727 for (;;) { 728 if ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) { 729 KASSERT(solocked2(so, so2)); 730 (void) soqremque(so2, 0); 731 /* soabort drops the lock. */ 732 (void) soabort(so2); 733 solock(so); 734 continue; 735 } 736 if ((so2 = TAILQ_FIRST(&so->so_q)) != 0) { 737 KASSERT(solocked2(so, so2)); 738 (void) soqremque(so2, 1); 739 /* soabort drops the lock. */ 740 (void) soabort(so2); 741 solock(so); 742 continue; 743 } 744 break; 745 } 746 } 747 if (so->so_pcb == 0) 748 goto discard; 749 if (so->so_state & SS_ISCONNECTED) { 750 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 751 error = sodisconnect(so); 752 if (error) 753 goto drop; 754 } 755 if (so->so_options & SO_LINGER) { 756 if ((so->so_state & SS_ISDISCONNECTING) && so->so_nbio) 757 goto drop; 758 while (so->so_state & SS_ISCONNECTED) { 759 error = sowait(so, true, so->so_linger * hz); 760 if (error) 761 break; 762 } 763 } 764 } 765 drop: 766 if (so->so_pcb) { 767 error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 768 NULL, NULL, NULL, NULL); 769 if (error == 0) 770 error = error2; 771 } 772 discard: 773 if (so->so_state & SS_NOFDREF) 774 panic("soclose: NOFDREF"); 775 kauth_cred_free(so->so_cred); 776 so->so_state |= SS_NOFDREF; 777 sofree(so); 778 return (error); 779 } 780 781 /* 782 * Must be called with the socket locked.. Will return with it unlocked. 783 */ 784 int 785 soabort(struct socket *so) 786 { 787 u_int refs; 788 int error; 789 790 KASSERT(solocked(so)); 791 KASSERT(so->so_head == NULL); 792 793 so->so_aborting++; /* XXX */ 794 error = (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, 795 NULL, NULL, NULL); 796 refs = --so->so_aborting; /* XXX */ 797 if (error || (refs == 0)) { 798 sofree(so); 799 } else { 800 sounlock(so); 801 } 802 return error; 803 } 804 805 int 806 soaccept(struct socket *so, struct mbuf *nam) 807 { 808 int error; 809 810 KASSERT(solocked(so)); 811 812 error = 0; 813 if ((so->so_state & SS_NOFDREF) == 0) 814 panic("soaccept: !NOFDREF"); 815 so->so_state &= ~SS_NOFDREF; 816 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 817 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 818 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 819 NULL, nam, NULL, NULL); 820 else 821 error = ECONNABORTED; 822 823 return (error); 824 } 825 826 int 827 soconnect(struct socket *so, struct mbuf *nam, struct lwp *l) 828 { 829 int error; 830 831 KASSERT(solocked(so)); 832 833 if (so->so_options & SO_ACCEPTCONN) 834 return (EOPNOTSUPP); 835 /* 836 * If protocol is connection-based, can only connect once. 837 * Otherwise, if connected, try to disconnect first. 838 * This allows user to disconnect by connecting to, e.g., 839 * a null address. 840 */ 841 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 842 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 843 (error = sodisconnect(so)))) 844 error = EISCONN; 845 else 846 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 847 NULL, nam, NULL, l); 848 return (error); 849 } 850 851 int 852 soconnect2(struct socket *so1, struct socket *so2) 853 { 854 int error; 855 856 KASSERT(solocked2(so1, so2)); 857 858 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 859 NULL, (struct mbuf *)so2, NULL, NULL); 860 return (error); 861 } 862 863 int 864 sodisconnect(struct socket *so) 865 { 866 int error; 867 868 KASSERT(solocked(so)); 869 870 if ((so->so_state & SS_ISCONNECTED) == 0) { 871 error = ENOTCONN; 872 } else if (so->so_state & SS_ISDISCONNECTING) { 873 error = EALREADY; 874 } else { 875 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 876 NULL, NULL, NULL, NULL); 877 } 878 sodopendfree(); 879 return (error); 880 } 881 882 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 883 /* 884 * Send on a socket. 885 * If send must go all at once and message is larger than 886 * send buffering, then hard error. 887 * Lock against other senders. 888 * If must go all at once and not enough room now, then 889 * inform user that this would block and do nothing. 890 * Otherwise, if nonblocking, send as much as possible. 891 * The data to be sent is described by "uio" if nonzero, 892 * otherwise by the mbuf chain "top" (which must be null 893 * if uio is not). Data provided in mbuf chain must be small 894 * enough to send all at once. 895 * 896 * Returns nonzero on error, timeout or signal; callers 897 * must check for short counts if EINTR/ERESTART are returned. 898 * Data and control buffers are freed on return. 899 */ 900 int 901 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 902 struct mbuf *control, int flags, struct lwp *l) 903 { 904 struct mbuf **mp, *m; 905 struct proc *p; 906 long space, len, resid, clen, mlen; 907 int error, s, dontroute, atomic; 908 short wakeup_state = 0; 909 910 p = l->l_proc; 911 sodopendfree(); 912 clen = 0; 913 914 /* 915 * solock() provides atomicity of access. splsoftnet() prevents 916 * protocol processing soft interrupts from interrupting us and 917 * blocking (expensive). 918 */ 919 s = splsoftnet(); 920 solock(so); 921 atomic = sosendallatonce(so) || top; 922 if (uio) 923 resid = uio->uio_resid; 924 else 925 resid = top->m_pkthdr.len; 926 /* 927 * In theory resid should be unsigned. 928 * However, space must be signed, as it might be less than 0 929 * if we over-committed, and we must use a signed comparison 930 * of space and resid. On the other hand, a negative resid 931 * causes us to loop sending 0-length segments to the protocol. 932 */ 933 if (resid < 0) { 934 error = EINVAL; 935 goto out; 936 } 937 dontroute = 938 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 939 (so->so_proto->pr_flags & PR_ATOMIC); 940 l->l_ru.ru_msgsnd++; 941 if (control) 942 clen = control->m_len; 943 restart: 944 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) 945 goto out; 946 do { 947 if (so->so_state & SS_CANTSENDMORE) { 948 error = EPIPE; 949 goto release; 950 } 951 if (so->so_error) { 952 error = so->so_error; 953 so->so_error = 0; 954 goto release; 955 } 956 if ((so->so_state & SS_ISCONNECTED) == 0) { 957 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 958 if ((so->so_state & SS_ISCONFIRMING) == 0 && 959 !(resid == 0 && clen != 0)) { 960 error = ENOTCONN; 961 goto release; 962 } 963 } else if (addr == 0) { 964 error = EDESTADDRREQ; 965 goto release; 966 } 967 } 968 space = sbspace(&so->so_snd); 969 if (flags & MSG_OOB) 970 space += 1024; 971 if ((atomic && resid > so->so_snd.sb_hiwat) || 972 clen > so->so_snd.sb_hiwat) { 973 error = EMSGSIZE; 974 goto release; 975 } 976 if (space < resid + clen && 977 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 978 if (so->so_nbio) { 979 error = EWOULDBLOCK; 980 goto release; 981 } 982 sbunlock(&so->so_snd); 983 if (wakeup_state & SS_RESTARTSYS) { 984 error = ERESTART; 985 goto out; 986 } 987 error = sbwait(&so->so_snd); 988 if (error) 989 goto out; 990 wakeup_state = so->so_state; 991 goto restart; 992 } 993 wakeup_state = 0; 994 mp = ⊤ 995 space -= clen; 996 do { 997 if (uio == NULL) { 998 /* 999 * Data is prepackaged in "top". 1000 */ 1001 resid = 0; 1002 if (flags & MSG_EOR) 1003 top->m_flags |= M_EOR; 1004 } else do { 1005 sounlock(so); 1006 splx(s); 1007 if (top == NULL) { 1008 m = m_gethdr(M_WAIT, MT_DATA); 1009 mlen = MHLEN; 1010 m->m_pkthdr.len = 0; 1011 m->m_pkthdr.rcvif = NULL; 1012 } else { 1013 m = m_get(M_WAIT, MT_DATA); 1014 mlen = MLEN; 1015 } 1016 MCLAIM(m, so->so_snd.sb_mowner); 1017 if (sock_loan_thresh >= 0 && 1018 uio->uio_iov->iov_len >= sock_loan_thresh && 1019 space >= sock_loan_thresh && 1020 (len = sosend_loan(so, uio, m, 1021 space)) != 0) { 1022 SOSEND_COUNTER_INCR(&sosend_loan_big); 1023 space -= len; 1024 goto have_data; 1025 } 1026 if (resid >= MINCLSIZE && space >= MCLBYTES) { 1027 SOSEND_COUNTER_INCR(&sosend_copy_big); 1028 m_clget(m, M_DONTWAIT); 1029 if ((m->m_flags & M_EXT) == 0) 1030 goto nopages; 1031 mlen = MCLBYTES; 1032 if (atomic && top == 0) { 1033 len = lmin(MCLBYTES - max_hdr, 1034 resid); 1035 m->m_data += max_hdr; 1036 } else 1037 len = lmin(MCLBYTES, resid); 1038 space -= len; 1039 } else { 1040 nopages: 1041 SOSEND_COUNTER_INCR(&sosend_copy_small); 1042 len = lmin(lmin(mlen, resid), space); 1043 space -= len; 1044 /* 1045 * For datagram protocols, leave room 1046 * for protocol headers in first mbuf. 1047 */ 1048 if (atomic && top == 0 && len < mlen) 1049 MH_ALIGN(m, len); 1050 } 1051 error = uiomove(mtod(m, void *), (int)len, uio); 1052 have_data: 1053 resid = uio->uio_resid; 1054 m->m_len = len; 1055 *mp = m; 1056 top->m_pkthdr.len += len; 1057 s = splsoftnet(); 1058 solock(so); 1059 if (error != 0) 1060 goto release; 1061 mp = &m->m_next; 1062 if (resid <= 0) { 1063 if (flags & MSG_EOR) 1064 top->m_flags |= M_EOR; 1065 break; 1066 } 1067 } while (space > 0 && atomic); 1068 1069 if (so->so_state & SS_CANTSENDMORE) { 1070 error = EPIPE; 1071 goto release; 1072 } 1073 if (dontroute) 1074 so->so_options |= SO_DONTROUTE; 1075 if (resid > 0) 1076 so->so_state |= SS_MORETOCOME; 1077 error = (*so->so_proto->pr_usrreq)(so, 1078 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 1079 top, addr, control, curlwp); 1080 if (dontroute) 1081 so->so_options &= ~SO_DONTROUTE; 1082 if (resid > 0) 1083 so->so_state &= ~SS_MORETOCOME; 1084 clen = 0; 1085 control = NULL; 1086 top = NULL; 1087 mp = ⊤ 1088 if (error != 0) 1089 goto release; 1090 } while (resid && space > 0); 1091 } while (resid); 1092 1093 release: 1094 sbunlock(&so->so_snd); 1095 out: 1096 sounlock(so); 1097 splx(s); 1098 if (top) 1099 m_freem(top); 1100 if (control) 1101 m_freem(control); 1102 return (error); 1103 } 1104 1105 /* 1106 * Following replacement or removal of the first mbuf on the first 1107 * mbuf chain of a socket buffer, push necessary state changes back 1108 * into the socket buffer so that other consumers see the values 1109 * consistently. 'nextrecord' is the callers locally stored value of 1110 * the original value of sb->sb_mb->m_nextpkt which must be restored 1111 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 1112 */ 1113 static void 1114 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 1115 { 1116 1117 KASSERT(solocked(sb->sb_so)); 1118 1119 /* 1120 * First, update for the new value of nextrecord. If necessary, 1121 * make it the first record. 1122 */ 1123 if (sb->sb_mb != NULL) 1124 sb->sb_mb->m_nextpkt = nextrecord; 1125 else 1126 sb->sb_mb = nextrecord; 1127 1128 /* 1129 * Now update any dependent socket buffer fields to reflect 1130 * the new state. This is an inline of SB_EMPTY_FIXUP, with 1131 * the addition of a second clause that takes care of the 1132 * case where sb_mb has been updated, but remains the last 1133 * record. 1134 */ 1135 if (sb->sb_mb == NULL) { 1136 sb->sb_mbtail = NULL; 1137 sb->sb_lastrecord = NULL; 1138 } else if (sb->sb_mb->m_nextpkt == NULL) 1139 sb->sb_lastrecord = sb->sb_mb; 1140 } 1141 1142 /* 1143 * Implement receive operations on a socket. 1144 * We depend on the way that records are added to the sockbuf 1145 * by sbappend*. In particular, each record (mbufs linked through m_next) 1146 * must begin with an address if the protocol so specifies, 1147 * followed by an optional mbuf or mbufs containing ancillary data, 1148 * and then zero or more mbufs of data. 1149 * In order to avoid blocking network interrupts for the entire time here, 1150 * we splx() while doing the actual copy to user space. 1151 * Although the sockbuf is locked, new data may still be appended, 1152 * and thus we must maintain consistency of the sockbuf during that time. 1153 * 1154 * The caller may receive the data as a single mbuf chain by supplying 1155 * an mbuf **mp0 for use in returning the chain. The uio is then used 1156 * only for the count in uio_resid. 1157 */ 1158 int 1159 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 1160 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1161 { 1162 struct lwp *l = curlwp; 1163 struct mbuf *m, **mp, *mt; 1164 int atomic, flags, len, error, s, offset, moff, type, orig_resid; 1165 const struct protosw *pr; 1166 struct mbuf *nextrecord; 1167 int mbuf_removed = 0; 1168 const struct domain *dom; 1169 short wakeup_state = 0; 1170 1171 pr = so->so_proto; 1172 atomic = pr->pr_flags & PR_ATOMIC; 1173 dom = pr->pr_domain; 1174 mp = mp0; 1175 type = 0; 1176 orig_resid = uio->uio_resid; 1177 1178 if (paddr != NULL) 1179 *paddr = NULL; 1180 if (controlp != NULL) 1181 *controlp = NULL; 1182 if (flagsp != NULL) 1183 flags = *flagsp &~ MSG_EOR; 1184 else 1185 flags = 0; 1186 1187 if ((flags & MSG_DONTWAIT) == 0) 1188 sodopendfree(); 1189 1190 if (flags & MSG_OOB) { 1191 m = m_get(M_WAIT, MT_DATA); 1192 solock(so); 1193 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 1194 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, l); 1195 sounlock(so); 1196 if (error) 1197 goto bad; 1198 do { 1199 error = uiomove(mtod(m, void *), 1200 (int) min(uio->uio_resid, m->m_len), uio); 1201 m = m_free(m); 1202 } while (uio->uio_resid > 0 && error == 0 && m); 1203 bad: 1204 if (m != NULL) 1205 m_freem(m); 1206 return error; 1207 } 1208 if (mp != NULL) 1209 *mp = NULL; 1210 1211 /* 1212 * solock() provides atomicity of access. splsoftnet() prevents 1213 * protocol processing soft interrupts from interrupting us and 1214 * blocking (expensive). 1215 */ 1216 s = splsoftnet(); 1217 solock(so); 1218 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 1219 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, l); 1220 1221 restart: 1222 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) { 1223 sounlock(so); 1224 splx(s); 1225 return error; 1226 } 1227 1228 m = so->so_rcv.sb_mb; 1229 /* 1230 * If we have less data than requested, block awaiting more 1231 * (subject to any timeout) if: 1232 * 1. the current count is less than the low water mark, 1233 * 2. MSG_WAITALL is set, and it is possible to do the entire 1234 * receive operation at once if we block (resid <= hiwat), or 1235 * 3. MSG_DONTWAIT is not set. 1236 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1237 * we have to do the receive in sections, and thus risk returning 1238 * a short count if a timeout or signal occurs after we start. 1239 */ 1240 if (m == NULL || 1241 ((flags & MSG_DONTWAIT) == 0 && 1242 so->so_rcv.sb_cc < uio->uio_resid && 1243 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1244 ((flags & MSG_WAITALL) && 1245 uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1246 m->m_nextpkt == NULL && !atomic)) { 1247 #ifdef DIAGNOSTIC 1248 if (m == NULL && so->so_rcv.sb_cc) 1249 panic("receive 1"); 1250 #endif 1251 if (so->so_error) { 1252 if (m != NULL) 1253 goto dontblock; 1254 error = so->so_error; 1255 if ((flags & MSG_PEEK) == 0) 1256 so->so_error = 0; 1257 goto release; 1258 } 1259 if (so->so_state & SS_CANTRCVMORE) { 1260 if (m != NULL) 1261 goto dontblock; 1262 else 1263 goto release; 1264 } 1265 for (; m != NULL; m = m->m_next) 1266 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1267 m = so->so_rcv.sb_mb; 1268 goto dontblock; 1269 } 1270 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1271 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1272 error = ENOTCONN; 1273 goto release; 1274 } 1275 if (uio->uio_resid == 0) 1276 goto release; 1277 if (so->so_nbio || (flags & MSG_DONTWAIT)) { 1278 error = EWOULDBLOCK; 1279 goto release; 1280 } 1281 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 1282 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 1283 sbunlock(&so->so_rcv); 1284 if (wakeup_state & SS_RESTARTSYS) 1285 error = ERESTART; 1286 else 1287 error = sbwait(&so->so_rcv); 1288 if (error != 0) { 1289 sounlock(so); 1290 splx(s); 1291 return error; 1292 } 1293 wakeup_state = so->so_state; 1294 goto restart; 1295 } 1296 dontblock: 1297 /* 1298 * On entry here, m points to the first record of the socket buffer. 1299 * From this point onward, we maintain 'nextrecord' as a cache of the 1300 * pointer to the next record in the socket buffer. We must keep the 1301 * various socket buffer pointers and local stack versions of the 1302 * pointers in sync, pushing out modifications before dropping the 1303 * socket lock, and re-reading them when picking it up. 1304 * 1305 * Otherwise, we will race with the network stack appending new data 1306 * or records onto the socket buffer by using inconsistent/stale 1307 * versions of the field, possibly resulting in socket buffer 1308 * corruption. 1309 * 1310 * By holding the high-level sblock(), we prevent simultaneous 1311 * readers from pulling off the front of the socket buffer. 1312 */ 1313 if (l != NULL) 1314 l->l_ru.ru_msgrcv++; 1315 KASSERT(m == so->so_rcv.sb_mb); 1316 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 1317 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 1318 nextrecord = m->m_nextpkt; 1319 if (pr->pr_flags & PR_ADDR) { 1320 #ifdef DIAGNOSTIC 1321 if (m->m_type != MT_SONAME) 1322 panic("receive 1a"); 1323 #endif 1324 orig_resid = 0; 1325 if (flags & MSG_PEEK) { 1326 if (paddr) 1327 *paddr = m_copy(m, 0, m->m_len); 1328 m = m->m_next; 1329 } else { 1330 sbfree(&so->so_rcv, m); 1331 mbuf_removed = 1; 1332 if (paddr != NULL) { 1333 *paddr = m; 1334 so->so_rcv.sb_mb = m->m_next; 1335 m->m_next = NULL; 1336 m = so->so_rcv.sb_mb; 1337 } else { 1338 MFREE(m, so->so_rcv.sb_mb); 1339 m = so->so_rcv.sb_mb; 1340 } 1341 sbsync(&so->so_rcv, nextrecord); 1342 } 1343 } 1344 1345 /* 1346 * Process one or more MT_CONTROL mbufs present before any data mbufs 1347 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1348 * just copy the data; if !MSG_PEEK, we call into the protocol to 1349 * perform externalization (or freeing if controlp == NULL). 1350 */ 1351 if (__predict_false(m != NULL && m->m_type == MT_CONTROL)) { 1352 struct mbuf *cm = NULL, *cmn; 1353 struct mbuf **cme = &cm; 1354 1355 do { 1356 if (flags & MSG_PEEK) { 1357 if (controlp != NULL) { 1358 *controlp = m_copy(m, 0, m->m_len); 1359 controlp = &(*controlp)->m_next; 1360 } 1361 m = m->m_next; 1362 } else { 1363 sbfree(&so->so_rcv, m); 1364 so->so_rcv.sb_mb = m->m_next; 1365 m->m_next = NULL; 1366 *cme = m; 1367 cme = &(*cme)->m_next; 1368 m = so->so_rcv.sb_mb; 1369 } 1370 } while (m != NULL && m->m_type == MT_CONTROL); 1371 if ((flags & MSG_PEEK) == 0) 1372 sbsync(&so->so_rcv, nextrecord); 1373 for (; cm != NULL; cm = cmn) { 1374 cmn = cm->m_next; 1375 cm->m_next = NULL; 1376 type = mtod(cm, struct cmsghdr *)->cmsg_type; 1377 if (controlp != NULL) { 1378 if (dom->dom_externalize != NULL && 1379 type == SCM_RIGHTS) { 1380 sounlock(so); 1381 splx(s); 1382 error = (*dom->dom_externalize)(cm, l); 1383 s = splsoftnet(); 1384 solock(so); 1385 } 1386 *controlp = cm; 1387 while (*controlp != NULL) 1388 controlp = &(*controlp)->m_next; 1389 } else { 1390 /* 1391 * Dispose of any SCM_RIGHTS message that went 1392 * through the read path rather than recv. 1393 */ 1394 if (dom->dom_dispose != NULL && 1395 type == SCM_RIGHTS) { 1396 sounlock(so); 1397 (*dom->dom_dispose)(cm); 1398 solock(so); 1399 } 1400 m_freem(cm); 1401 } 1402 } 1403 if (m != NULL) 1404 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1405 else 1406 nextrecord = so->so_rcv.sb_mb; 1407 orig_resid = 0; 1408 } 1409 1410 /* If m is non-NULL, we have some data to read. */ 1411 if (__predict_true(m != NULL)) { 1412 type = m->m_type; 1413 if (type == MT_OOBDATA) 1414 flags |= MSG_OOB; 1415 } 1416 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 1417 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 1418 1419 moff = 0; 1420 offset = 0; 1421 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1422 if (m->m_type == MT_OOBDATA) { 1423 if (type != MT_OOBDATA) 1424 break; 1425 } else if (type == MT_OOBDATA) 1426 break; 1427 #ifdef DIAGNOSTIC 1428 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 1429 panic("receive 3"); 1430 #endif 1431 so->so_state &= ~SS_RCVATMARK; 1432 wakeup_state = 0; 1433 len = uio->uio_resid; 1434 if (so->so_oobmark && len > so->so_oobmark - offset) 1435 len = so->so_oobmark - offset; 1436 if (len > m->m_len - moff) 1437 len = m->m_len - moff; 1438 /* 1439 * If mp is set, just pass back the mbufs. 1440 * Otherwise copy them out via the uio, then free. 1441 * Sockbuf must be consistent here (points to current mbuf, 1442 * it points to next record) when we drop priority; 1443 * we must note any additions to the sockbuf when we 1444 * block interrupts again. 1445 */ 1446 if (mp == NULL) { 1447 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 1448 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 1449 sounlock(so); 1450 splx(s); 1451 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1452 s = splsoftnet(); 1453 solock(so); 1454 if (error != 0) { 1455 /* 1456 * If any part of the record has been removed 1457 * (such as the MT_SONAME mbuf, which will 1458 * happen when PR_ADDR, and thus also 1459 * PR_ATOMIC, is set), then drop the entire 1460 * record to maintain the atomicity of the 1461 * receive operation. 1462 * 1463 * This avoids a later panic("receive 1a") 1464 * when compiled with DIAGNOSTIC. 1465 */ 1466 if (m && mbuf_removed && atomic) 1467 (void) sbdroprecord(&so->so_rcv); 1468 1469 goto release; 1470 } 1471 } else 1472 uio->uio_resid -= len; 1473 if (len == m->m_len - moff) { 1474 if (m->m_flags & M_EOR) 1475 flags |= MSG_EOR; 1476 if (flags & MSG_PEEK) { 1477 m = m->m_next; 1478 moff = 0; 1479 } else { 1480 nextrecord = m->m_nextpkt; 1481 sbfree(&so->so_rcv, m); 1482 if (mp) { 1483 *mp = m; 1484 mp = &m->m_next; 1485 so->so_rcv.sb_mb = m = m->m_next; 1486 *mp = NULL; 1487 } else { 1488 MFREE(m, so->so_rcv.sb_mb); 1489 m = so->so_rcv.sb_mb; 1490 } 1491 /* 1492 * If m != NULL, we also know that 1493 * so->so_rcv.sb_mb != NULL. 1494 */ 1495 KASSERT(so->so_rcv.sb_mb == m); 1496 if (m) { 1497 m->m_nextpkt = nextrecord; 1498 if (nextrecord == NULL) 1499 so->so_rcv.sb_lastrecord = m; 1500 } else { 1501 so->so_rcv.sb_mb = nextrecord; 1502 SB_EMPTY_FIXUP(&so->so_rcv); 1503 } 1504 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 1505 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 1506 } 1507 } else if (flags & MSG_PEEK) 1508 moff += len; 1509 else { 1510 if (mp != NULL) { 1511 mt = m_copym(m, 0, len, M_NOWAIT); 1512 if (__predict_false(mt == NULL)) { 1513 sounlock(so); 1514 mt = m_copym(m, 0, len, M_WAIT); 1515 solock(so); 1516 } 1517 *mp = mt; 1518 } 1519 m->m_data += len; 1520 m->m_len -= len; 1521 so->so_rcv.sb_cc -= len; 1522 } 1523 if (so->so_oobmark) { 1524 if ((flags & MSG_PEEK) == 0) { 1525 so->so_oobmark -= len; 1526 if (so->so_oobmark == 0) { 1527 so->so_state |= SS_RCVATMARK; 1528 break; 1529 } 1530 } else { 1531 offset += len; 1532 if (offset == so->so_oobmark) 1533 break; 1534 } 1535 } 1536 if (flags & MSG_EOR) 1537 break; 1538 /* 1539 * If the MSG_WAITALL flag is set (for non-atomic socket), 1540 * we must not quit until "uio->uio_resid == 0" or an error 1541 * termination. If a signal/timeout occurs, return 1542 * with a short count but without error. 1543 * Keep sockbuf locked against other readers. 1544 */ 1545 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1546 !sosendallatonce(so) && !nextrecord) { 1547 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1548 break; 1549 /* 1550 * If we are peeking and the socket receive buffer is 1551 * full, stop since we can't get more data to peek at. 1552 */ 1553 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0) 1554 break; 1555 /* 1556 * If we've drained the socket buffer, tell the 1557 * protocol in case it needs to do something to 1558 * get it filled again. 1559 */ 1560 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1561 (*pr->pr_usrreq)(so, PRU_RCVD, 1562 NULL, (struct mbuf *)(long)flags, NULL, l); 1563 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1564 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1565 if (wakeup_state & SS_RESTARTSYS) 1566 error = ERESTART; 1567 else 1568 error = sbwait(&so->so_rcv); 1569 if (error != 0) { 1570 sbunlock(&so->so_rcv); 1571 sounlock(so); 1572 splx(s); 1573 return 0; 1574 } 1575 if ((m = so->so_rcv.sb_mb) != NULL) 1576 nextrecord = m->m_nextpkt; 1577 wakeup_state = so->so_state; 1578 } 1579 } 1580 1581 if (m && atomic) { 1582 flags |= MSG_TRUNC; 1583 if ((flags & MSG_PEEK) == 0) 1584 (void) sbdroprecord(&so->so_rcv); 1585 } 1586 if ((flags & MSG_PEEK) == 0) { 1587 if (m == NULL) { 1588 /* 1589 * First part is an inline SB_EMPTY_FIXUP(). Second 1590 * part makes sure sb_lastrecord is up-to-date if 1591 * there is still data in the socket buffer. 1592 */ 1593 so->so_rcv.sb_mb = nextrecord; 1594 if (so->so_rcv.sb_mb == NULL) { 1595 so->so_rcv.sb_mbtail = NULL; 1596 so->so_rcv.sb_lastrecord = NULL; 1597 } else if (nextrecord->m_nextpkt == NULL) 1598 so->so_rcv.sb_lastrecord = nextrecord; 1599 } 1600 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1601 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1602 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1603 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 1604 (struct mbuf *)(long)flags, NULL, l); 1605 } 1606 if (orig_resid == uio->uio_resid && orig_resid && 1607 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1608 sbunlock(&so->so_rcv); 1609 goto restart; 1610 } 1611 1612 if (flagsp != NULL) 1613 *flagsp |= flags; 1614 release: 1615 sbunlock(&so->so_rcv); 1616 sounlock(so); 1617 splx(s); 1618 return error; 1619 } 1620 1621 int 1622 soshutdown(struct socket *so, int how) 1623 { 1624 const struct protosw *pr; 1625 int error; 1626 1627 KASSERT(solocked(so)); 1628 1629 pr = so->so_proto; 1630 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1631 return (EINVAL); 1632 1633 if (how == SHUT_RD || how == SHUT_RDWR) { 1634 sorflush(so); 1635 error = 0; 1636 } 1637 if (how == SHUT_WR || how == SHUT_RDWR) 1638 error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, 1639 NULL, NULL, NULL); 1640 1641 return error; 1642 } 1643 1644 void 1645 sorestart(struct socket *so) 1646 { 1647 /* 1648 * An application has called close() on an fd on which another 1649 * of its threads has called a socket system call. 1650 * Mark this and wake everyone up, and code that would block again 1651 * instead returns ERESTART. 1652 * On system call re-entry the fd is validated and EBADF returned. 1653 * Any other fd will block again on the 2nd syscall. 1654 */ 1655 solock(so); 1656 so->so_state |= SS_RESTARTSYS; 1657 cv_broadcast(&so->so_cv); 1658 cv_broadcast(&so->so_snd.sb_cv); 1659 cv_broadcast(&so->so_rcv.sb_cv); 1660 sounlock(so); 1661 } 1662 1663 void 1664 sorflush(struct socket *so) 1665 { 1666 struct sockbuf *sb, asb; 1667 const struct protosw *pr; 1668 1669 KASSERT(solocked(so)); 1670 1671 sb = &so->so_rcv; 1672 pr = so->so_proto; 1673 socantrcvmore(so); 1674 sb->sb_flags |= SB_NOINTR; 1675 (void )sblock(sb, M_WAITOK); 1676 sbunlock(sb); 1677 asb = *sb; 1678 /* 1679 * Clear most of the sockbuf structure, but leave some of the 1680 * fields valid. 1681 */ 1682 memset(&sb->sb_startzero, 0, 1683 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1684 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) { 1685 sounlock(so); 1686 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 1687 solock(so); 1688 } 1689 sbrelease(&asb, so); 1690 } 1691 1692 /* 1693 * internal set SOL_SOCKET options 1694 */ 1695 static int 1696 sosetopt1(struct socket *so, const struct sockopt *sopt) 1697 { 1698 int error = EINVAL, optval, opt; 1699 struct linger l; 1700 struct timeval tv; 1701 1702 switch ((opt = sopt->sopt_name)) { 1703 1704 case SO_ACCEPTFILTER: 1705 error = accept_filt_setopt(so, sopt); 1706 KASSERT(solocked(so)); 1707 break; 1708 1709 case SO_LINGER: 1710 error = sockopt_get(sopt, &l, sizeof(l)); 1711 solock(so); 1712 if (error) 1713 break; 1714 if (l.l_linger < 0 || l.l_linger > USHRT_MAX || 1715 l.l_linger > (INT_MAX / hz)) { 1716 error = EDOM; 1717 break; 1718 } 1719 so->so_linger = l.l_linger; 1720 if (l.l_onoff) 1721 so->so_options |= SO_LINGER; 1722 else 1723 so->so_options &= ~SO_LINGER; 1724 break; 1725 1726 case SO_DEBUG: 1727 case SO_KEEPALIVE: 1728 case SO_DONTROUTE: 1729 case SO_USELOOPBACK: 1730 case SO_BROADCAST: 1731 case SO_REUSEADDR: 1732 case SO_REUSEPORT: 1733 case SO_OOBINLINE: 1734 case SO_TIMESTAMP: 1735 #ifdef SO_OTIMESTAMP 1736 case SO_OTIMESTAMP: 1737 #endif 1738 error = sockopt_getint(sopt, &optval); 1739 solock(so); 1740 if (error) 1741 break; 1742 if (optval) 1743 so->so_options |= opt; 1744 else 1745 so->so_options &= ~opt; 1746 break; 1747 1748 case SO_SNDBUF: 1749 case SO_RCVBUF: 1750 case SO_SNDLOWAT: 1751 case SO_RCVLOWAT: 1752 error = sockopt_getint(sopt, &optval); 1753 solock(so); 1754 if (error) 1755 break; 1756 1757 /* 1758 * Values < 1 make no sense for any of these 1759 * options, so disallow them. 1760 */ 1761 if (optval < 1) { 1762 error = EINVAL; 1763 break; 1764 } 1765 1766 switch (opt) { 1767 case SO_SNDBUF: 1768 if (sbreserve(&so->so_snd, (u_long)optval, so) == 0) { 1769 error = ENOBUFS; 1770 break; 1771 } 1772 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 1773 break; 1774 1775 case SO_RCVBUF: 1776 if (sbreserve(&so->so_rcv, (u_long)optval, so) == 0) { 1777 error = ENOBUFS; 1778 break; 1779 } 1780 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1781 break; 1782 1783 /* 1784 * Make sure the low-water is never greater than 1785 * the high-water. 1786 */ 1787 case SO_SNDLOWAT: 1788 if (optval > so->so_snd.sb_hiwat) 1789 optval = so->so_snd.sb_hiwat; 1790 1791 so->so_snd.sb_lowat = optval; 1792 break; 1793 1794 case SO_RCVLOWAT: 1795 if (optval > so->so_rcv.sb_hiwat) 1796 optval = so->so_rcv.sb_hiwat; 1797 1798 so->so_rcv.sb_lowat = optval; 1799 break; 1800 } 1801 break; 1802 1803 #ifdef COMPAT_50 1804 case SO_OSNDTIMEO: 1805 case SO_ORCVTIMEO: { 1806 struct timeval50 otv; 1807 error = sockopt_get(sopt, &otv, sizeof(otv)); 1808 if (error) { 1809 solock(so); 1810 break; 1811 } 1812 timeval50_to_timeval(&otv, &tv); 1813 opt = opt == SO_OSNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO; 1814 error = 0; 1815 /*FALLTHROUGH*/ 1816 } 1817 #endif /* COMPAT_50 */ 1818 1819 case SO_SNDTIMEO: 1820 case SO_RCVTIMEO: 1821 if (error) 1822 error = sockopt_get(sopt, &tv, sizeof(tv)); 1823 solock(so); 1824 if (error) 1825 break; 1826 1827 if (tv.tv_sec > (INT_MAX - tv.tv_usec / tick) / hz) { 1828 error = EDOM; 1829 break; 1830 } 1831 1832 optval = tv.tv_sec * hz + tv.tv_usec / tick; 1833 if (optval == 0 && tv.tv_usec != 0) 1834 optval = 1; 1835 1836 switch (opt) { 1837 case SO_SNDTIMEO: 1838 so->so_snd.sb_timeo = optval; 1839 break; 1840 case SO_RCVTIMEO: 1841 so->so_rcv.sb_timeo = optval; 1842 break; 1843 } 1844 break; 1845 1846 default: 1847 solock(so); 1848 error = ENOPROTOOPT; 1849 break; 1850 } 1851 KASSERT(solocked(so)); 1852 return error; 1853 } 1854 1855 int 1856 sosetopt(struct socket *so, struct sockopt *sopt) 1857 { 1858 int error, prerr; 1859 1860 if (sopt->sopt_level == SOL_SOCKET) { 1861 error = sosetopt1(so, sopt); 1862 KASSERT(solocked(so)); 1863 } else { 1864 error = ENOPROTOOPT; 1865 solock(so); 1866 } 1867 1868 if ((error == 0 || error == ENOPROTOOPT) && 1869 so->so_proto != NULL && so->so_proto->pr_ctloutput != NULL) { 1870 /* give the protocol stack a shot */ 1871 prerr = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, sopt); 1872 if (prerr == 0) 1873 error = 0; 1874 else if (prerr != ENOPROTOOPT) 1875 error = prerr; 1876 } 1877 sounlock(so); 1878 return error; 1879 } 1880 1881 /* 1882 * so_setsockopt() is a wrapper providing a sockopt structure for sosetopt() 1883 */ 1884 int 1885 so_setsockopt(struct lwp *l, struct socket *so, int level, int name, 1886 const void *val, size_t valsize) 1887 { 1888 struct sockopt sopt; 1889 int error; 1890 1891 KASSERT(valsize == 0 || val != NULL); 1892 1893 sockopt_init(&sopt, level, name, valsize); 1894 sockopt_set(&sopt, val, valsize); 1895 1896 error = sosetopt(so, &sopt); 1897 1898 sockopt_destroy(&sopt); 1899 1900 return error; 1901 } 1902 1903 /* 1904 * internal get SOL_SOCKET options 1905 */ 1906 static int 1907 sogetopt1(struct socket *so, struct sockopt *sopt) 1908 { 1909 int error, optval, opt; 1910 struct linger l; 1911 struct timeval tv; 1912 1913 switch ((opt = sopt->sopt_name)) { 1914 1915 case SO_ACCEPTFILTER: 1916 error = accept_filt_getopt(so, sopt); 1917 break; 1918 1919 case SO_LINGER: 1920 l.l_onoff = (so->so_options & SO_LINGER) ? 1 : 0; 1921 l.l_linger = so->so_linger; 1922 1923 error = sockopt_set(sopt, &l, sizeof(l)); 1924 break; 1925 1926 case SO_USELOOPBACK: 1927 case SO_DONTROUTE: 1928 case SO_DEBUG: 1929 case SO_KEEPALIVE: 1930 case SO_REUSEADDR: 1931 case SO_REUSEPORT: 1932 case SO_BROADCAST: 1933 case SO_OOBINLINE: 1934 case SO_TIMESTAMP: 1935 #ifdef SO_OTIMESTAMP 1936 case SO_OTIMESTAMP: 1937 #endif 1938 error = sockopt_setint(sopt, (so->so_options & opt) ? 1 : 0); 1939 break; 1940 1941 case SO_TYPE: 1942 error = sockopt_setint(sopt, so->so_type); 1943 break; 1944 1945 case SO_ERROR: 1946 error = sockopt_setint(sopt, so->so_error); 1947 so->so_error = 0; 1948 break; 1949 1950 case SO_SNDBUF: 1951 error = sockopt_setint(sopt, so->so_snd.sb_hiwat); 1952 break; 1953 1954 case SO_RCVBUF: 1955 error = sockopt_setint(sopt, so->so_rcv.sb_hiwat); 1956 break; 1957 1958 case SO_SNDLOWAT: 1959 error = sockopt_setint(sopt, so->so_snd.sb_lowat); 1960 break; 1961 1962 case SO_RCVLOWAT: 1963 error = sockopt_setint(sopt, so->so_rcv.sb_lowat); 1964 break; 1965 1966 #ifdef COMPAT_50 1967 case SO_OSNDTIMEO: 1968 case SO_ORCVTIMEO: { 1969 struct timeval50 otv; 1970 1971 optval = (opt == SO_OSNDTIMEO ? 1972 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1973 1974 otv.tv_sec = optval / hz; 1975 otv.tv_usec = (optval % hz) * tick; 1976 1977 error = sockopt_set(sopt, &otv, sizeof(otv)); 1978 break; 1979 } 1980 #endif /* COMPAT_50 */ 1981 1982 case SO_SNDTIMEO: 1983 case SO_RCVTIMEO: 1984 optval = (opt == SO_SNDTIMEO ? 1985 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1986 1987 tv.tv_sec = optval / hz; 1988 tv.tv_usec = (optval % hz) * tick; 1989 1990 error = sockopt_set(sopt, &tv, sizeof(tv)); 1991 break; 1992 1993 case SO_OVERFLOWED: 1994 error = sockopt_setint(sopt, so->so_rcv.sb_overflowed); 1995 break; 1996 1997 default: 1998 error = ENOPROTOOPT; 1999 break; 2000 } 2001 2002 return (error); 2003 } 2004 2005 int 2006 sogetopt(struct socket *so, struct sockopt *sopt) 2007 { 2008 int error; 2009 2010 solock(so); 2011 if (sopt->sopt_level != SOL_SOCKET) { 2012 if (so->so_proto && so->so_proto->pr_ctloutput) { 2013 error = ((*so->so_proto->pr_ctloutput) 2014 (PRCO_GETOPT, so, sopt)); 2015 } else 2016 error = (ENOPROTOOPT); 2017 } else { 2018 error = sogetopt1(so, sopt); 2019 } 2020 sounlock(so); 2021 return (error); 2022 } 2023 2024 /* 2025 * alloc sockopt data buffer buffer 2026 * - will be released at destroy 2027 */ 2028 static int 2029 sockopt_alloc(struct sockopt *sopt, size_t len, km_flag_t kmflag) 2030 { 2031 2032 KASSERT(sopt->sopt_size == 0); 2033 2034 if (len > sizeof(sopt->sopt_buf)) { 2035 sopt->sopt_data = kmem_zalloc(len, kmflag); 2036 if (sopt->sopt_data == NULL) 2037 return ENOMEM; 2038 } else 2039 sopt->sopt_data = sopt->sopt_buf; 2040 2041 sopt->sopt_size = len; 2042 return 0; 2043 } 2044 2045 /* 2046 * initialise sockopt storage 2047 * - MAY sleep during allocation 2048 */ 2049 void 2050 sockopt_init(struct sockopt *sopt, int level, int name, size_t size) 2051 { 2052 2053 memset(sopt, 0, sizeof(*sopt)); 2054 2055 sopt->sopt_level = level; 2056 sopt->sopt_name = name; 2057 (void)sockopt_alloc(sopt, size, KM_SLEEP); 2058 } 2059 2060 /* 2061 * destroy sockopt storage 2062 * - will release any held memory references 2063 */ 2064 void 2065 sockopt_destroy(struct sockopt *sopt) 2066 { 2067 2068 if (sopt->sopt_data != sopt->sopt_buf) 2069 kmem_free(sopt->sopt_data, sopt->sopt_size); 2070 2071 memset(sopt, 0, sizeof(*sopt)); 2072 } 2073 2074 /* 2075 * set sockopt value 2076 * - value is copied into sockopt 2077 * - memory is allocated when necessary, will not sleep 2078 */ 2079 int 2080 sockopt_set(struct sockopt *sopt, const void *buf, size_t len) 2081 { 2082 int error; 2083 2084 if (sopt->sopt_size == 0) { 2085 error = sockopt_alloc(sopt, len, KM_NOSLEEP); 2086 if (error) 2087 return error; 2088 } 2089 2090 KASSERT(sopt->sopt_size == len); 2091 memcpy(sopt->sopt_data, buf, len); 2092 return 0; 2093 } 2094 2095 /* 2096 * common case of set sockopt integer value 2097 */ 2098 int 2099 sockopt_setint(struct sockopt *sopt, int val) 2100 { 2101 2102 return sockopt_set(sopt, &val, sizeof(int)); 2103 } 2104 2105 /* 2106 * get sockopt value 2107 * - correct size must be given 2108 */ 2109 int 2110 sockopt_get(const struct sockopt *sopt, void *buf, size_t len) 2111 { 2112 2113 if (sopt->sopt_size != len) 2114 return EINVAL; 2115 2116 memcpy(buf, sopt->sopt_data, len); 2117 return 0; 2118 } 2119 2120 /* 2121 * common case of get sockopt integer value 2122 */ 2123 int 2124 sockopt_getint(const struct sockopt *sopt, int *valp) 2125 { 2126 2127 return sockopt_get(sopt, valp, sizeof(int)); 2128 } 2129 2130 /* 2131 * set sockopt value from mbuf 2132 * - ONLY for legacy code 2133 * - mbuf is released by sockopt 2134 * - will not sleep 2135 */ 2136 int 2137 sockopt_setmbuf(struct sockopt *sopt, struct mbuf *m) 2138 { 2139 size_t len; 2140 int error; 2141 2142 len = m_length(m); 2143 2144 if (sopt->sopt_size == 0) { 2145 error = sockopt_alloc(sopt, len, KM_NOSLEEP); 2146 if (error) 2147 return error; 2148 } 2149 2150 KASSERT(sopt->sopt_size == len); 2151 m_copydata(m, 0, len, sopt->sopt_data); 2152 m_freem(m); 2153 2154 return 0; 2155 } 2156 2157 /* 2158 * get sockopt value into mbuf 2159 * - ONLY for legacy code 2160 * - mbuf to be released by the caller 2161 * - will not sleep 2162 */ 2163 struct mbuf * 2164 sockopt_getmbuf(const struct sockopt *sopt) 2165 { 2166 struct mbuf *m; 2167 2168 if (sopt->sopt_size > MCLBYTES) 2169 return NULL; 2170 2171 m = m_get(M_DONTWAIT, MT_SOOPTS); 2172 if (m == NULL) 2173 return NULL; 2174 2175 if (sopt->sopt_size > MLEN) { 2176 MCLGET(m, M_DONTWAIT); 2177 if ((m->m_flags & M_EXT) == 0) { 2178 m_free(m); 2179 return NULL; 2180 } 2181 } 2182 2183 memcpy(mtod(m, void *), sopt->sopt_data, sopt->sopt_size); 2184 m->m_len = sopt->sopt_size; 2185 2186 return m; 2187 } 2188 2189 void 2190 sohasoutofband(struct socket *so) 2191 { 2192 2193 fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so); 2194 selnotify(&so->so_rcv.sb_sel, POLLPRI | POLLRDBAND, NOTE_SUBMIT); 2195 } 2196 2197 static void 2198 filt_sordetach(struct knote *kn) 2199 { 2200 struct socket *so; 2201 2202 so = ((file_t *)kn->kn_obj)->f_data; 2203 solock(so); 2204 SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext); 2205 if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist)) 2206 so->so_rcv.sb_flags &= ~SB_KNOTE; 2207 sounlock(so); 2208 } 2209 2210 /*ARGSUSED*/ 2211 static int 2212 filt_soread(struct knote *kn, long hint) 2213 { 2214 struct socket *so; 2215 int rv; 2216 2217 so = ((file_t *)kn->kn_obj)->f_data; 2218 if (hint != NOTE_SUBMIT) 2219 solock(so); 2220 kn->kn_data = so->so_rcv.sb_cc; 2221 if (so->so_state & SS_CANTRCVMORE) { 2222 kn->kn_flags |= EV_EOF; 2223 kn->kn_fflags = so->so_error; 2224 rv = 1; 2225 } else if (so->so_error) /* temporary udp error */ 2226 rv = 1; 2227 else if (kn->kn_sfflags & NOTE_LOWAT) 2228 rv = (kn->kn_data >= kn->kn_sdata); 2229 else 2230 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2231 if (hint != NOTE_SUBMIT) 2232 sounlock(so); 2233 return rv; 2234 } 2235 2236 static void 2237 filt_sowdetach(struct knote *kn) 2238 { 2239 struct socket *so; 2240 2241 so = ((file_t *)kn->kn_obj)->f_data; 2242 solock(so); 2243 SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext); 2244 if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist)) 2245 so->so_snd.sb_flags &= ~SB_KNOTE; 2246 sounlock(so); 2247 } 2248 2249 /*ARGSUSED*/ 2250 static int 2251 filt_sowrite(struct knote *kn, long hint) 2252 { 2253 struct socket *so; 2254 int rv; 2255 2256 so = ((file_t *)kn->kn_obj)->f_data; 2257 if (hint != NOTE_SUBMIT) 2258 solock(so); 2259 kn->kn_data = sbspace(&so->so_snd); 2260 if (so->so_state & SS_CANTSENDMORE) { 2261 kn->kn_flags |= EV_EOF; 2262 kn->kn_fflags = so->so_error; 2263 rv = 1; 2264 } else if (so->so_error) /* temporary udp error */ 2265 rv = 1; 2266 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2267 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2268 rv = 0; 2269 else if (kn->kn_sfflags & NOTE_LOWAT) 2270 rv = (kn->kn_data >= kn->kn_sdata); 2271 else 2272 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2273 if (hint != NOTE_SUBMIT) 2274 sounlock(so); 2275 return rv; 2276 } 2277 2278 /*ARGSUSED*/ 2279 static int 2280 filt_solisten(struct knote *kn, long hint) 2281 { 2282 struct socket *so; 2283 int rv; 2284 2285 so = ((file_t *)kn->kn_obj)->f_data; 2286 2287 /* 2288 * Set kn_data to number of incoming connections, not 2289 * counting partial (incomplete) connections. 2290 */ 2291 if (hint != NOTE_SUBMIT) 2292 solock(so); 2293 kn->kn_data = so->so_qlen; 2294 rv = (kn->kn_data > 0); 2295 if (hint != NOTE_SUBMIT) 2296 sounlock(so); 2297 return rv; 2298 } 2299 2300 static const struct filterops solisten_filtops = 2301 { 1, NULL, filt_sordetach, filt_solisten }; 2302 static const struct filterops soread_filtops = 2303 { 1, NULL, filt_sordetach, filt_soread }; 2304 static const struct filterops sowrite_filtops = 2305 { 1, NULL, filt_sowdetach, filt_sowrite }; 2306 2307 int 2308 soo_kqfilter(struct file *fp, struct knote *kn) 2309 { 2310 struct socket *so; 2311 struct sockbuf *sb; 2312 2313 so = ((file_t *)kn->kn_obj)->f_data; 2314 solock(so); 2315 switch (kn->kn_filter) { 2316 case EVFILT_READ: 2317 if (so->so_options & SO_ACCEPTCONN) 2318 kn->kn_fop = &solisten_filtops; 2319 else 2320 kn->kn_fop = &soread_filtops; 2321 sb = &so->so_rcv; 2322 break; 2323 case EVFILT_WRITE: 2324 kn->kn_fop = &sowrite_filtops; 2325 sb = &so->so_snd; 2326 break; 2327 default: 2328 sounlock(so); 2329 return (EINVAL); 2330 } 2331 SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext); 2332 sb->sb_flags |= SB_KNOTE; 2333 sounlock(so); 2334 return (0); 2335 } 2336 2337 static int 2338 sodopoll(struct socket *so, int events) 2339 { 2340 int revents; 2341 2342 revents = 0; 2343 2344 if (events & (POLLIN | POLLRDNORM)) 2345 if (soreadable(so)) 2346 revents |= events & (POLLIN | POLLRDNORM); 2347 2348 if (events & (POLLOUT | POLLWRNORM)) 2349 if (sowritable(so)) 2350 revents |= events & (POLLOUT | POLLWRNORM); 2351 2352 if (events & (POLLPRI | POLLRDBAND)) 2353 if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) 2354 revents |= events & (POLLPRI | POLLRDBAND); 2355 2356 return revents; 2357 } 2358 2359 int 2360 sopoll(struct socket *so, int events) 2361 { 2362 int revents = 0; 2363 2364 #ifndef DIAGNOSTIC 2365 /* 2366 * Do a quick, unlocked check in expectation that the socket 2367 * will be ready for I/O. Don't do this check if DIAGNOSTIC, 2368 * as the solocked() assertions will fail. 2369 */ 2370 if ((revents = sodopoll(so, events)) != 0) 2371 return revents; 2372 #endif 2373 2374 solock(so); 2375 if ((revents = sodopoll(so, events)) == 0) { 2376 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { 2377 selrecord(curlwp, &so->so_rcv.sb_sel); 2378 so->so_rcv.sb_flags |= SB_NOTIFY; 2379 } 2380 2381 if (events & (POLLOUT | POLLWRNORM)) { 2382 selrecord(curlwp, &so->so_snd.sb_sel); 2383 so->so_snd.sb_flags |= SB_NOTIFY; 2384 } 2385 } 2386 sounlock(so); 2387 2388 return revents; 2389 } 2390 2391 2392 #include <sys/sysctl.h> 2393 2394 static int sysctl_kern_somaxkva(SYSCTLFN_PROTO); 2395 2396 /* 2397 * sysctl helper routine for kern.somaxkva. ensures that the given 2398 * value is not too small. 2399 * (XXX should we maybe make sure it's not too large as well?) 2400 */ 2401 static int 2402 sysctl_kern_somaxkva(SYSCTLFN_ARGS) 2403 { 2404 int error, new_somaxkva; 2405 struct sysctlnode node; 2406 2407 new_somaxkva = somaxkva; 2408 node = *rnode; 2409 node.sysctl_data = &new_somaxkva; 2410 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2411 if (error || newp == NULL) 2412 return (error); 2413 2414 if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */ 2415 return (EINVAL); 2416 2417 mutex_enter(&so_pendfree_lock); 2418 somaxkva = new_somaxkva; 2419 cv_broadcast(&socurkva_cv); 2420 mutex_exit(&so_pendfree_lock); 2421 2422 return (error); 2423 } 2424 2425 static void 2426 sysctl_kern_somaxkva_setup(void) 2427 { 2428 2429 KASSERT(socket_sysctllog == NULL); 2430 sysctl_createv(&socket_sysctllog, 0, NULL, NULL, 2431 CTLFLAG_PERMANENT, 2432 CTLTYPE_NODE, "kern", NULL, 2433 NULL, 0, NULL, 0, 2434 CTL_KERN, CTL_EOL); 2435 2436 sysctl_createv(&socket_sysctllog, 0, NULL, NULL, 2437 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2438 CTLTYPE_INT, "somaxkva", 2439 SYSCTL_DESCR("Maximum amount of kernel memory to be " 2440 "used for socket buffers"), 2441 sysctl_kern_somaxkva, 0, NULL, 0, 2442 CTL_KERN, KERN_SOMAXKVA, CTL_EOL); 2443 } 2444