1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 36 * 37 * License terms: all terms for the DragonFly license above plus the following: 38 * 39 * 4. All advertising materials mentioning features or use of this software 40 * must display the following acknowledgement: 41 * 42 * This product includes software developed by Jeffrey M. Hsu 43 * for the DragonFly Project. 44 * 45 * This requirement may be waived with permission from Jeffrey Hsu. 46 * This requirement will sunset and may be removed on July 8 2005, 47 * after which the standard DragonFly license (as shown above) will 48 * apply. 49 */ 50 51 /* 52 * Copyright (c) 1982, 1986, 1988, 1990, 1993 53 * The Regents of the University of California. All rights reserved. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 3. All advertising materials mentioning features or use of this software 64 * must display the following acknowledgement: 65 * This product includes software developed by the University of 66 * California, Berkeley and its contributors. 67 * 4. Neither the name of the University nor the names of its contributors 68 * may be used to endorse or promote products derived from this software 69 * without specific prior written permission. 70 * 71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 81 * SUCH DAMAGE. 82 * 83 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 84 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 85 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.26 2004/12/08 23:59:01 hsu Exp $ 86 */ 87 88 #include "opt_inet.h" 89 90 #include <sys/param.h> 91 #include <sys/systm.h> 92 #include <sys/fcntl.h> 93 #include <sys/malloc.h> 94 #include <sys/mbuf.h> 95 #include <sys/domain.h> 96 #include <sys/file.h> /* for struct knote */ 97 #include <sys/kernel.h> 98 #include <sys/malloc.h> 99 #include <sys/event.h> 100 #include <sys/poll.h> 101 #include <sys/proc.h> 102 #include <sys/protosw.h> 103 #include <sys/socket.h> 104 #include <sys/socketvar.h> 105 #include <sys/socketops.h> 106 #include <sys/resourcevar.h> 107 #include <sys/signalvar.h> 108 #include <sys/sysctl.h> 109 #include <sys/uio.h> 110 #include <sys/jail.h> 111 #include <vm/vm_zone.h> 112 113 #include <machine/limits.h> 114 115 #ifdef INET 116 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 117 #endif /* INET */ 118 119 static void filt_sordetach(struct knote *kn); 120 static int filt_soread(struct knote *kn, long hint); 121 static void filt_sowdetach(struct knote *kn); 122 static int filt_sowrite(struct knote *kn, long hint); 123 static int filt_solisten(struct knote *kn, long hint); 124 125 static struct filterops solisten_filtops = 126 { 1, NULL, filt_sordetach, filt_solisten }; 127 static struct filterops soread_filtops = 128 { 1, NULL, filt_sordetach, filt_soread }; 129 static struct filterops sowrite_filtops = 130 { 1, NULL, filt_sowdetach, filt_sowrite }; 131 132 struct vm_zone *socket_zone; 133 so_gen_t so_gencnt; /* generation count for sockets */ 134 135 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 136 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 137 138 139 static int somaxconn = SOMAXCONN; 140 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 141 &somaxconn, 0, "Maximum pending socket connection queue size"); 142 143 /* 144 * Socket operation routines. 145 * These routines are called by the routines in 146 * sys_socket.c or from a system process, and 147 * implement the semantics of socket operations by 148 * switching out to the protocol specific routines. 149 */ 150 151 /* 152 * Get a socket structure from our zone, and initialize it. 153 * We don't implement `waitok' yet (see comments in uipc_domain.c). 154 * Note that it would probably be better to allocate socket 155 * and PCB at the same time, but I'm not convinced that all 156 * the protocols can be easily modified to do this. 157 */ 158 struct socket * 159 soalloc(waitok) 160 int waitok; 161 { 162 struct socket *so; 163 164 so = zalloc(socket_zone); 165 if (so) { 166 /* XXX race condition for reentrant kernel */ 167 bzero(so, sizeof *so); 168 so->so_gencnt = ++so_gencnt; 169 TAILQ_INIT(&so->so_aiojobq); 170 TAILQ_INIT(&so->so_rcv.sb_sel.si_mlist); 171 TAILQ_INIT(&so->so_snd.sb_sel.si_mlist); 172 } 173 return so; 174 } 175 176 int 177 socreate(int dom, struct socket **aso, int type, 178 int proto, struct thread *td) 179 { 180 struct proc *p = td->td_proc; 181 struct protosw *prp; 182 struct socket *so; 183 struct pru_attach_info ai; 184 int error; 185 186 if (proto) 187 prp = pffindproto(dom, proto, type); 188 else 189 prp = pffindtype(dom, type); 190 191 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0) 192 return (EPROTONOSUPPORT); 193 194 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 195 prp->pr_domain->dom_family != PF_LOCAL && 196 prp->pr_domain->dom_family != PF_INET && 197 prp->pr_domain->dom_family != PF_ROUTE) { 198 return (EPROTONOSUPPORT); 199 } 200 201 if (prp->pr_type != type) 202 return (EPROTOTYPE); 203 so = soalloc(p != 0); 204 if (so == 0) 205 return (ENOBUFS); 206 207 TAILQ_INIT(&so->so_incomp); 208 TAILQ_INIT(&so->so_comp); 209 so->so_type = type; 210 so->so_cred = crhold(p->p_ucred); 211 so->so_proto = prp; 212 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 213 ai.p_ucred = p->p_ucred; 214 ai.fd_rdir = p->p_fd->fd_rdir; 215 error = so_pru_attach(so, proto, &ai); 216 if (error) { 217 so->so_state |= SS_NOFDREF; 218 sofree(so); 219 return (error); 220 } 221 *aso = so; 222 return (0); 223 } 224 225 int 226 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 227 { 228 int s = splnet(); 229 int error; 230 231 error = so_pru_bind(so, nam, td); 232 splx(s); 233 return (error); 234 } 235 236 void 237 sodealloc(struct socket *so) 238 { 239 240 so->so_gencnt = ++so_gencnt; 241 if (so->so_rcv.sb_hiwat) 242 (void)chgsbsize(so->so_cred->cr_uidinfo, 243 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 244 if (so->so_snd.sb_hiwat) 245 (void)chgsbsize(so->so_cred->cr_uidinfo, 246 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 247 #ifdef INET 248 /* remove accept filter if present */ 249 if (so->so_accf != NULL) 250 do_setopt_accept_filter(so, NULL); 251 #endif /* INET */ 252 crfree(so->so_cred); 253 zfree(socket_zone, so); 254 } 255 256 int 257 solisten(struct socket *so, int backlog, struct thread *td) 258 { 259 int s, error; 260 261 s = splnet(); 262 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) { 263 splx(s); 264 return (EINVAL); 265 } 266 267 error = so_pru_listen(so, td); 268 if (error) { 269 splx(s); 270 return (error); 271 } 272 if (TAILQ_EMPTY(&so->so_comp)) 273 so->so_options |= SO_ACCEPTCONN; 274 if (backlog < 0 || backlog > somaxconn) 275 backlog = somaxconn; 276 so->so_qlimit = backlog; 277 splx(s); 278 return (0); 279 } 280 281 void 282 sofree(struct socket *so) 283 { 284 struct socket *head = so->so_head; 285 286 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 287 return; 288 if (head != NULL) { 289 if (so->so_state & SS_INCOMP) { 290 TAILQ_REMOVE(&head->so_incomp, so, so_list); 291 head->so_incqlen--; 292 } else if (so->so_state & SS_COMP) { 293 /* 294 * We must not decommission a socket that's 295 * on the accept(2) queue. If we do, then 296 * accept(2) may hang after select(2) indicated 297 * that the listening socket was ready. 298 */ 299 return; 300 } else { 301 panic("sofree: not queued"); 302 } 303 so->so_state &= ~SS_INCOMP; 304 so->so_head = NULL; 305 } 306 sbrelease(&so->so_snd, so); 307 sorflush(so); 308 sodealloc(so); 309 } 310 311 /* 312 * Close a socket on last file table reference removal. 313 * Initiate disconnect if connected. 314 * Free socket when disconnect complete. 315 */ 316 int 317 soclose(struct socket *so) 318 { 319 int s = splnet(); /* conservative */ 320 int error = 0; 321 322 funsetown(so->so_sigio); 323 if (so->so_pcb == NULL) 324 goto discard; 325 if (so->so_state & SS_ISCONNECTED) { 326 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 327 error = sodisconnect(so); 328 if (error) 329 goto drop; 330 } 331 if (so->so_options & SO_LINGER) { 332 if ((so->so_state & SS_ISDISCONNECTING) && 333 (so->so_state & SS_NBIO)) 334 goto drop; 335 while (so->so_state & SS_ISCONNECTED) { 336 error = tsleep((caddr_t)&so->so_timeo, 337 PCATCH, "soclos", so->so_linger * hz); 338 if (error) 339 break; 340 } 341 } 342 } 343 drop: 344 if (so->so_pcb) { 345 int error2; 346 347 error2 = so_pru_detach(so); 348 if (error == 0) 349 error = error2; 350 } 351 discard: 352 if (so->so_options & SO_ACCEPTCONN) { 353 struct socket *sp, *sonext; 354 355 sp = TAILQ_FIRST(&so->so_incomp); 356 for (; sp != NULL; sp = sonext) { 357 sonext = TAILQ_NEXT(sp, so_list); 358 (void) soabort(sp); 359 } 360 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) { 361 sonext = TAILQ_NEXT(sp, so_list); 362 /* Dequeue from so_comp since sofree() won't do it */ 363 TAILQ_REMOVE(&so->so_comp, sp, so_list); 364 so->so_qlen--; 365 sp->so_state &= ~SS_COMP; 366 sp->so_head = NULL; 367 (void) soabort(sp); 368 } 369 } 370 if (so->so_state & SS_NOFDREF) 371 panic("soclose: NOFDREF"); 372 so->so_state |= SS_NOFDREF; 373 sofree(so); 374 splx(s); 375 return (error); 376 } 377 378 /* 379 * Must be called at splnet... 380 */ 381 int 382 soabort(so) 383 struct socket *so; 384 { 385 int error; 386 387 error = so_pru_abort(so); 388 if (error) { 389 sofree(so); 390 return error; 391 } 392 return (0); 393 } 394 395 int 396 soaccept(struct socket *so, struct sockaddr **nam) 397 { 398 int s = splnet(); 399 int error; 400 401 if ((so->so_state & SS_NOFDREF) == 0) 402 panic("soaccept: !NOFDREF"); 403 so->so_state &= ~SS_NOFDREF; 404 error = so_pru_accept(so, nam); 405 splx(s); 406 return (error); 407 } 408 409 int 410 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 411 { 412 int s; 413 int error; 414 415 if (so->so_options & SO_ACCEPTCONN) 416 return (EOPNOTSUPP); 417 s = splnet(); 418 /* 419 * If protocol is connection-based, can only connect once. 420 * Otherwise, if connected, try to disconnect first. 421 * This allows user to disconnect by connecting to, e.g., 422 * a null address. 423 */ 424 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 425 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 426 (error = sodisconnect(so)))) 427 error = EISCONN; 428 else 429 error = so_pru_connect(so, nam, td); 430 splx(s); 431 return (error); 432 } 433 434 int 435 soconnect2(struct socket *so1, struct socket *so2) 436 { 437 int s = splnet(); 438 int error; 439 440 error = so_pru_connect2(so1, so2); 441 splx(s); 442 return (error); 443 } 444 445 int 446 sodisconnect(struct socket *so) 447 { 448 int s = splnet(); 449 int error; 450 451 if ((so->so_state & SS_ISCONNECTED) == 0) { 452 error = ENOTCONN; 453 goto bad; 454 } 455 if (so->so_state & SS_ISDISCONNECTING) { 456 error = EALREADY; 457 goto bad; 458 } 459 error = so_pru_disconnect(so); 460 bad: 461 splx(s); 462 return (error); 463 } 464 465 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 466 /* 467 * Send on a socket. 468 * If send must go all at once and message is larger than 469 * send buffering, then hard error. 470 * Lock against other senders. 471 * If must go all at once and not enough room now, then 472 * inform user that this would block and do nothing. 473 * Otherwise, if nonblocking, send as much as possible. 474 * The data to be sent is described by "uio" if nonzero, 475 * otherwise by the mbuf chain "top" (which must be null 476 * if uio is not). Data provided in mbuf chain must be small 477 * enough to send all at once. 478 * 479 * Returns nonzero on error, timeout or signal; callers 480 * must check for short counts if EINTR/ERESTART are returned. 481 * Data and control buffers are freed on return. 482 */ 483 int 484 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 485 struct mbuf *top, struct mbuf *control, int flags, 486 struct thread *td) 487 { 488 struct mbuf **mp; 489 struct mbuf *m; 490 long space, len, resid; 491 int clen = 0, error, s, dontroute, mlen; 492 int atomic = sosendallatonce(so) || top; 493 int pru_flags; 494 495 if (uio) 496 resid = uio->uio_resid; 497 else 498 resid = top->m_pkthdr.len; 499 /* 500 * In theory resid should be unsigned. 501 * However, space must be signed, as it might be less than 0 502 * if we over-committed, and we must use a signed comparison 503 * of space and resid. On the other hand, a negative resid 504 * causes us to loop sending 0-length segments to the protocol. 505 * 506 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 507 * type sockets since that's an error. 508 */ 509 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 510 error = EINVAL; 511 goto out; 512 } 513 514 dontroute = 515 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 516 (so->so_proto->pr_flags & PR_ATOMIC); 517 if (td->td_proc && td->td_proc->p_stats) 518 td->td_proc->p_stats->p_ru.ru_msgsnd++; 519 if (control) 520 clen = control->m_len; 521 #define gotoerr(errno) { error = errno; splx(s); goto release; } 522 523 restart: 524 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 525 if (error) 526 goto out; 527 do { 528 s = splnet(); 529 if (so->so_state & SS_CANTSENDMORE) 530 gotoerr(EPIPE); 531 if (so->so_error) { 532 error = so->so_error; 533 so->so_error = 0; 534 splx(s); 535 goto release; 536 } 537 if ((so->so_state & SS_ISCONNECTED) == 0) { 538 /* 539 * `sendto' and `sendmsg' is allowed on a connection- 540 * based socket if it supports implied connect. 541 * Return ENOTCONN if not connected and no address is 542 * supplied. 543 */ 544 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 545 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 546 if ((so->so_state & SS_ISCONFIRMING) == 0 && 547 !(resid == 0 && clen != 0)) 548 gotoerr(ENOTCONN); 549 } else if (addr == 0) 550 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 551 ENOTCONN : EDESTADDRREQ); 552 } 553 space = sbspace(&so->so_snd); 554 if (flags & MSG_OOB) 555 space += 1024; 556 if ((atomic && resid > so->so_snd.sb_hiwat) || 557 clen > so->so_snd.sb_hiwat) 558 gotoerr(EMSGSIZE); 559 if (space < resid + clen && uio && 560 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 561 if (so->so_state & SS_NBIO) 562 gotoerr(EWOULDBLOCK); 563 sbunlock(&so->so_snd); 564 error = sbwait(&so->so_snd); 565 splx(s); 566 if (error) 567 goto out; 568 goto restart; 569 } 570 splx(s); 571 mp = ⊤ 572 space -= clen; 573 do { 574 if (uio == NULL) { 575 /* 576 * Data is prepackaged in "top". 577 */ 578 resid = 0; 579 if (flags & MSG_EOR) 580 top->m_flags |= M_EOR; 581 } else do { 582 if (top == 0) { 583 MGETHDR(m, MB_WAIT, MT_DATA); 584 if (m == NULL) { 585 error = ENOBUFS; 586 goto release; 587 } 588 mlen = MHLEN; 589 m->m_pkthdr.len = 0; 590 m->m_pkthdr.rcvif = (struct ifnet *)0; 591 } else { 592 MGET(m, MB_WAIT, MT_DATA); 593 if (m == NULL) { 594 error = ENOBUFS; 595 goto release; 596 } 597 mlen = MLEN; 598 } 599 if (resid >= MINCLSIZE) { 600 MCLGET(m, MB_WAIT); 601 if ((m->m_flags & M_EXT) == 0) 602 goto nopages; 603 mlen = MCLBYTES; 604 len = min(min(mlen, resid), space); 605 } else { 606 nopages: 607 len = min(min(mlen, resid), space); 608 /* 609 * For datagram protocols, leave room 610 * for protocol headers in first mbuf. 611 */ 612 if (atomic && top == 0 && len < mlen) 613 MH_ALIGN(m, len); 614 } 615 space -= len; 616 error = uiomove(mtod(m, caddr_t), (int)len, uio); 617 resid = uio->uio_resid; 618 m->m_len = len; 619 *mp = m; 620 top->m_pkthdr.len += len; 621 if (error) 622 goto release; 623 mp = &m->m_next; 624 if (resid <= 0) { 625 if (flags & MSG_EOR) 626 top->m_flags |= M_EOR; 627 break; 628 } 629 } while (space > 0 && atomic); 630 if (dontroute) 631 so->so_options |= SO_DONTROUTE; 632 if (flags & MSG_OOB) { 633 pru_flags = PRUS_OOB; 634 } else if ((flags & MSG_EOF) && 635 (so->so_proto->pr_flags & PR_IMPLOPCL) && 636 (resid <= 0)) { 637 /* 638 * If the user set MSG_EOF, the protocol 639 * understands this flag and nothing left to 640 * send then use PRU_SEND_EOF instead of PRU_SEND. 641 */ 642 pru_flags = PRUS_EOF; 643 } else if (resid > 0 && space > 0) { 644 /* If there is more to send, set PRUS_MORETOCOME */ 645 pru_flags = PRUS_MORETOCOME; 646 } else { 647 pru_flags = 0; 648 } 649 s = splnet(); /* XXX */ 650 /* 651 * XXX all the SS_CANTSENDMORE checks previously 652 * done could be out of date. We could have recieved 653 * a reset packet in an interrupt or maybe we slept 654 * while doing page faults in uiomove() etc. We could 655 * probably recheck again inside the splnet() protection 656 * here, but there are probably other places that this 657 * also happens. We must rethink this. 658 */ 659 error = so_pru_send(so, pru_flags, top, addr, control, td); 660 splx(s); 661 if (dontroute) 662 so->so_options &= ~SO_DONTROUTE; 663 clen = 0; 664 control = 0; 665 top = 0; 666 mp = ⊤ 667 if (error) 668 goto release; 669 } while (resid && space > 0); 670 } while (resid); 671 672 release: 673 sbunlock(&so->so_snd); 674 out: 675 if (top) 676 m_freem(top); 677 if (control) 678 m_freem(control); 679 return (error); 680 } 681 682 /* 683 * A specialization of sosend() for UDP based on protocol-specific knowledge: 684 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 685 * sosendallatonce() returns true, 686 * the "atomic" variable is true, 687 * and sosendudp() blocks until space is available for the entire send. 688 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 689 * PR_IMPLOPCL flags set. 690 * UDP has no out-of-band data. 691 * UDP has no control data. 692 * UDP does not support MSG_EOR. 693 */ 694 int 695 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 696 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 697 { 698 int resid, error, s; 699 boolean_t dontroute; /* temporary SO_DONTROUTE setting */ 700 701 if (td->td_proc && td->td_proc->p_stats) 702 td->td_proc->p_stats->p_ru.ru_msgsnd++; 703 if (control) 704 m_freem(control); 705 706 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 707 resid = uio ? uio->uio_resid : top->m_pkthdr.len; 708 709 restart: 710 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 711 if (error) 712 goto out; 713 714 s = splnet(); 715 if (so->so_state & SS_CANTSENDMORE) 716 gotoerr(EPIPE); 717 if (so->so_error) { 718 error = so->so_error; 719 so->so_error = 0; 720 splx(s); 721 goto release; 722 } 723 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 724 gotoerr(EDESTADDRREQ); 725 if (resid > so->so_snd.sb_hiwat) 726 gotoerr(EMSGSIZE); 727 if (uio && sbspace(&so->so_snd) < resid) { 728 if (so->so_state & SS_NBIO) 729 gotoerr(EWOULDBLOCK); 730 sbunlock(&so->so_snd); 731 error = sbwait(&so->so_snd); 732 splx(s); 733 if (error) 734 goto out; 735 goto restart; 736 } 737 splx(s); 738 739 if (uio) { 740 top = m_uiomove(uio, MB_WAIT, 0); 741 if (top == NULL) 742 goto release; 743 } 744 745 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE); 746 if (dontroute) 747 so->so_options |= SO_DONTROUTE; 748 749 error = so_pru_send(so, 0, top, addr, NULL, td); 750 top = NULL; /* sent or freed in lower layer */ 751 752 if (dontroute) 753 so->so_options &= ~SO_DONTROUTE; 754 755 release: 756 sbunlock(&so->so_snd); 757 out: 758 if (top) 759 m_freem(top); 760 return (error); 761 } 762 763 /* 764 * Implement receive operations on a socket. 765 * We depend on the way that records are added to the sockbuf 766 * by sbappend*. In particular, each record (mbufs linked through m_next) 767 * must begin with an address if the protocol so specifies, 768 * followed by an optional mbuf or mbufs containing ancillary data, 769 * and then zero or more mbufs of data. 770 * In order to avoid blocking network interrupts for the entire time here, 771 * we splx() while doing the actual copy to user space. 772 * Although the sockbuf is locked, new data may still be appended, 773 * and thus we must maintain consistency of the sockbuf during that time. 774 * 775 * The caller may receive the data as a single mbuf chain by supplying 776 * an mbuf **mp0 for use in returning the chain. The uio is then used 777 * only for the count in uio_resid. 778 */ 779 int 780 soreceive(so, psa, uio, mp0, controlp, flagsp) 781 struct socket *so; 782 struct sockaddr **psa; 783 struct uio *uio; 784 struct mbuf **mp0; 785 struct mbuf **controlp; 786 int *flagsp; 787 { 788 struct mbuf *m, **mp; 789 int flags, len, error, s, offset; 790 struct protosw *pr = so->so_proto; 791 struct mbuf *nextrecord; 792 int moff, type = 0; 793 int orig_resid = uio->uio_resid; 794 795 mp = mp0; 796 if (psa) 797 *psa = 0; 798 if (controlp) 799 *controlp = 0; 800 if (flagsp) 801 flags = *flagsp &~ MSG_EOR; 802 else 803 flags = 0; 804 if (flags & MSG_OOB) { 805 m = m_get(MB_WAIT, MT_DATA); 806 if (m == NULL) 807 return (ENOBUFS); 808 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 809 if (error) 810 goto bad; 811 do { 812 error = uiomove(mtod(m, caddr_t), 813 (int) min(uio->uio_resid, m->m_len), uio); 814 m = m_free(m); 815 } while (uio->uio_resid && error == 0 && m); 816 bad: 817 if (m) 818 m_freem(m); 819 return (error); 820 } 821 if (mp) 822 *mp = (struct mbuf *)0; 823 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 824 so_pru_rcvd(so, 0); 825 826 restart: 827 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 828 if (error) 829 return (error); 830 s = splnet(); 831 832 m = so->so_rcv.sb_mb; 833 /* 834 * If we have less data than requested, block awaiting more 835 * (subject to any timeout) if: 836 * 1. the current count is less than the low water mark, or 837 * 2. MSG_WAITALL is set, and it is possible to do the entire 838 * receive operation at once if we block (resid <= hiwat). 839 * 3. MSG_DONTWAIT is not set 840 * If MSG_WAITALL is set but resid is larger than the receive buffer, 841 * we have to do the receive in sections, and thus risk returning 842 * a short count if a timeout or signal occurs after we start. 843 */ 844 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 && 845 so->so_rcv.sb_cc < uio->uio_resid) && 846 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 847 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 848 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 849 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1")); 850 if (so->so_error) { 851 if (m) 852 goto dontblock; 853 error = so->so_error; 854 if ((flags & MSG_PEEK) == 0) 855 so->so_error = 0; 856 goto release; 857 } 858 if (so->so_state & SS_CANTRCVMORE) { 859 if (m) 860 goto dontblock; 861 else 862 goto release; 863 } 864 for (; m; m = m->m_next) 865 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 866 m = so->so_rcv.sb_mb; 867 goto dontblock; 868 } 869 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 870 (pr->pr_flags & PR_CONNREQUIRED)) { 871 error = ENOTCONN; 872 goto release; 873 } 874 if (uio->uio_resid == 0) 875 goto release; 876 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) { 877 error = EWOULDBLOCK; 878 goto release; 879 } 880 sbunlock(&so->so_rcv); 881 error = sbwait(&so->so_rcv); 882 splx(s); 883 if (error) 884 return (error); 885 goto restart; 886 } 887 dontblock: 888 if (uio->uio_td && uio->uio_td->td_proc) 889 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++; 890 nextrecord = m->m_nextpkt; 891 if (pr->pr_flags & PR_ADDR) { 892 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 893 orig_resid = 0; 894 if (psa) 895 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 896 if (flags & MSG_PEEK) { 897 m = m->m_next; 898 } else { 899 sbfree(&so->so_rcv, m); 900 so->so_rcv.sb_mb = m_free(m); 901 m = so->so_rcv.sb_mb; 902 } 903 } 904 while (m && m->m_type == MT_CONTROL && error == 0) { 905 if (flags & MSG_PEEK) { 906 if (controlp) 907 *controlp = m_copy(m, 0, m->m_len); 908 m = m->m_next; 909 } else { 910 sbfree(&so->so_rcv, m); 911 if (controlp) { 912 if (pr->pr_domain->dom_externalize && 913 mtod(m, struct cmsghdr *)->cmsg_type == 914 SCM_RIGHTS) 915 error = (*pr->pr_domain->dom_externalize)(m); 916 *controlp = m; 917 so->so_rcv.sb_mb = m->m_next; 918 m->m_next = 0; 919 m = so->so_rcv.sb_mb; 920 } else { 921 so->so_rcv.sb_mb = m_free(m); 922 m = so->so_rcv.sb_mb; 923 } 924 } 925 if (controlp) { 926 orig_resid = 0; 927 controlp = &(*controlp)->m_next; 928 } 929 } 930 if (m) { 931 if ((flags & MSG_PEEK) == 0) 932 m->m_nextpkt = nextrecord; 933 type = m->m_type; 934 if (type == MT_OOBDATA) 935 flags |= MSG_OOB; 936 } 937 moff = 0; 938 offset = 0; 939 while (m && uio->uio_resid > 0 && error == 0) { 940 if (m->m_type == MT_OOBDATA) { 941 if (type != MT_OOBDATA) 942 break; 943 } else if (type == MT_OOBDATA) 944 break; 945 else 946 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 947 ("receive 3")); 948 so->so_state &= ~SS_RCVATMARK; 949 len = uio->uio_resid; 950 if (so->so_oobmark && len > so->so_oobmark - offset) 951 len = so->so_oobmark - offset; 952 if (len > m->m_len - moff) 953 len = m->m_len - moff; 954 /* 955 * If mp is set, just pass back the mbufs. 956 * Otherwise copy them out via the uio, then free. 957 * Sockbuf must be consistent here (points to current mbuf, 958 * it points to next record) when we drop priority; 959 * we must note any additions to the sockbuf when we 960 * block interrupts again. 961 */ 962 if (mp == 0) { 963 splx(s); 964 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 965 s = splnet(); 966 if (error) 967 goto release; 968 } else 969 uio->uio_resid -= len; 970 if (len == m->m_len - moff) { 971 if (m->m_flags & M_EOR) 972 flags |= MSG_EOR; 973 if (flags & MSG_PEEK) { 974 m = m->m_next; 975 moff = 0; 976 } else { 977 nextrecord = m->m_nextpkt; 978 sbfree(&so->so_rcv, m); 979 if (mp) { 980 *mp = m; 981 mp = &m->m_next; 982 so->so_rcv.sb_mb = m = m->m_next; 983 *mp = (struct mbuf *)0; 984 } else { 985 so->so_rcv.sb_mb = m = m_free(m); 986 } 987 if (m) 988 m->m_nextpkt = nextrecord; 989 else 990 so->so_rcv.sb_lastmbuf = NULL; 991 } 992 } else { 993 if (flags & MSG_PEEK) 994 moff += len; 995 else { 996 if (mp) 997 *mp = m_copym(m, 0, len, MB_WAIT); 998 m->m_data += len; 999 m->m_len -= len; 1000 so->so_rcv.sb_cc -= len; 1001 } 1002 } 1003 if (so->so_oobmark) { 1004 if ((flags & MSG_PEEK) == 0) { 1005 so->so_oobmark -= len; 1006 if (so->so_oobmark == 0) { 1007 so->so_state |= SS_RCVATMARK; 1008 break; 1009 } 1010 } else { 1011 offset += len; 1012 if (offset == so->so_oobmark) 1013 break; 1014 } 1015 } 1016 if (flags & MSG_EOR) 1017 break; 1018 /* 1019 * If the MSG_WAITALL flag is set (for non-atomic socket), 1020 * we must not quit until "uio->uio_resid == 0" or an error 1021 * termination. If a signal/timeout occurs, return 1022 * with a short count but without error. 1023 * Keep sockbuf locked against other readers. 1024 */ 1025 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && 1026 !sosendallatonce(so) && !nextrecord) { 1027 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1028 break; 1029 /* 1030 * The window might have closed to zero, make 1031 * sure we send an ack now that we've drained 1032 * the buffer or we might end up blocking until 1033 * the idle takes over (5 seconds). 1034 */ 1035 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1036 so_pru_rcvd(so, flags); 1037 error = sbwait(&so->so_rcv); 1038 if (error) { 1039 sbunlock(&so->so_rcv); 1040 splx(s); 1041 return (0); 1042 } 1043 m = so->so_rcv.sb_mb; 1044 if (m) 1045 nextrecord = m->m_nextpkt; 1046 } 1047 } 1048 1049 if (m && pr->pr_flags & PR_ATOMIC) { 1050 flags |= MSG_TRUNC; 1051 if ((flags & MSG_PEEK) == 0) 1052 (void) sbdroprecord(&so->so_rcv); 1053 } 1054 if ((flags & MSG_PEEK) == 0) { 1055 if (m == 0) { 1056 so->so_rcv.sb_mb = nextrecord; 1057 so->so_rcv.sb_lastmbuf = NULL; 1058 } 1059 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1060 so_pru_rcvd(so, flags); 1061 } 1062 if (orig_resid == uio->uio_resid && orig_resid && 1063 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1064 sbunlock(&so->so_rcv); 1065 splx(s); 1066 goto restart; 1067 } 1068 1069 if (flagsp) 1070 *flagsp |= flags; 1071 release: 1072 sbunlock(&so->so_rcv); 1073 splx(s); 1074 return (error); 1075 } 1076 1077 int 1078 soshutdown(so, how) 1079 struct socket *so; 1080 int how; 1081 { 1082 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1083 return (EINVAL); 1084 1085 if (how != SHUT_WR) 1086 sorflush(so); 1087 if (how != SHUT_RD) 1088 return (so_pru_shutdown(so)); 1089 return (0); 1090 } 1091 1092 void 1093 sorflush(so) 1094 struct socket *so; 1095 { 1096 struct sockbuf *sb = &so->so_rcv; 1097 struct protosw *pr = so->so_proto; 1098 int s; 1099 struct sockbuf asb; 1100 1101 sb->sb_flags |= SB_NOINTR; 1102 (void) sblock(sb, M_WAITOK); 1103 s = splimp(); 1104 socantrcvmore(so); 1105 sbunlock(sb); 1106 asb = *sb; 1107 bzero((caddr_t)sb, sizeof (*sb)); 1108 if (asb.sb_flags & SB_KNOTE) { 1109 sb->sb_sel.si_note = asb.sb_sel.si_note; 1110 sb->sb_flags = SB_KNOTE; 1111 } 1112 splx(s); 1113 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1114 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 1115 sbrelease(&asb, so); 1116 } 1117 1118 #ifdef INET 1119 static int 1120 do_setopt_accept_filter(so, sopt) 1121 struct socket *so; 1122 struct sockopt *sopt; 1123 { 1124 struct accept_filter_arg *afap = NULL; 1125 struct accept_filter *afp; 1126 struct so_accf *af = so->so_accf; 1127 int error = 0; 1128 1129 /* do not set/remove accept filters on non listen sockets */ 1130 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1131 error = EINVAL; 1132 goto out; 1133 } 1134 1135 /* removing the filter */ 1136 if (sopt == NULL) { 1137 if (af != NULL) { 1138 if (af->so_accept_filter != NULL && 1139 af->so_accept_filter->accf_destroy != NULL) { 1140 af->so_accept_filter->accf_destroy(so); 1141 } 1142 if (af->so_accept_filter_str != NULL) { 1143 FREE(af->so_accept_filter_str, M_ACCF); 1144 } 1145 FREE(af, M_ACCF); 1146 so->so_accf = NULL; 1147 } 1148 so->so_options &= ~SO_ACCEPTFILTER; 1149 return (0); 1150 } 1151 /* adding a filter */ 1152 /* must remove previous filter first */ 1153 if (af != NULL) { 1154 error = EINVAL; 1155 goto out; 1156 } 1157 /* don't put large objects on the kernel stack */ 1158 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK); 1159 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1160 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1161 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1162 if (error) 1163 goto out; 1164 afp = accept_filt_get(afap->af_name); 1165 if (afp == NULL) { 1166 error = ENOENT; 1167 goto out; 1168 } 1169 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK); 1170 bzero(af, sizeof(*af)); 1171 if (afp->accf_create != NULL) { 1172 if (afap->af_name[0] != '\0') { 1173 int len = strlen(afap->af_name) + 1; 1174 1175 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK); 1176 strcpy(af->so_accept_filter_str, afap->af_name); 1177 } 1178 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1179 if (af->so_accept_filter_arg == NULL) { 1180 FREE(af->so_accept_filter_str, M_ACCF); 1181 FREE(af, M_ACCF); 1182 so->so_accf = NULL; 1183 error = EINVAL; 1184 goto out; 1185 } 1186 } 1187 af->so_accept_filter = afp; 1188 so->so_accf = af; 1189 so->so_options |= SO_ACCEPTFILTER; 1190 out: 1191 if (afap != NULL) 1192 FREE(afap, M_TEMP); 1193 return (error); 1194 } 1195 #endif /* INET */ 1196 1197 /* 1198 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1199 * an additional variant to handle the case where the option value needs 1200 * to be some kind of integer, but not a specific size. 1201 * In addition to their use here, these functions are also called by the 1202 * protocol-level pr_ctloutput() routines. 1203 */ 1204 int 1205 sooptcopyin(sopt, buf, len, minlen) 1206 struct sockopt *sopt; 1207 void *buf; 1208 size_t len; 1209 size_t minlen; 1210 { 1211 size_t valsize; 1212 1213 /* 1214 * If the user gives us more than we wanted, we ignore it, 1215 * but if we don't get the minimum length the caller 1216 * wants, we return EINVAL. On success, sopt->sopt_valsize 1217 * is set to however much we actually retrieved. 1218 */ 1219 if ((valsize = sopt->sopt_valsize) < minlen) 1220 return EINVAL; 1221 if (valsize > len) 1222 sopt->sopt_valsize = valsize = len; 1223 1224 if (sopt->sopt_td != NULL) 1225 return (copyin(sopt->sopt_val, buf, valsize)); 1226 1227 bcopy(sopt->sopt_val, buf, valsize); 1228 return 0; 1229 } 1230 1231 int 1232 sosetopt(so, sopt) 1233 struct socket *so; 1234 struct sockopt *sopt; 1235 { 1236 int error, optval; 1237 struct linger l; 1238 struct timeval tv; 1239 u_long val; 1240 1241 error = 0; 1242 if (sopt->sopt_level != SOL_SOCKET) { 1243 if (so->so_proto && so->so_proto->pr_ctloutput) { 1244 return (so_pr_ctloutput(so, sopt)); 1245 } 1246 error = ENOPROTOOPT; 1247 } else { 1248 switch (sopt->sopt_name) { 1249 #ifdef INET 1250 case SO_ACCEPTFILTER: 1251 error = do_setopt_accept_filter(so, sopt); 1252 if (error) 1253 goto bad; 1254 break; 1255 #endif /* INET */ 1256 case SO_LINGER: 1257 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1258 if (error) 1259 goto bad; 1260 1261 so->so_linger = l.l_linger; 1262 if (l.l_onoff) 1263 so->so_options |= SO_LINGER; 1264 else 1265 so->so_options &= ~SO_LINGER; 1266 break; 1267 1268 case SO_DEBUG: 1269 case SO_KEEPALIVE: 1270 case SO_DONTROUTE: 1271 case SO_USELOOPBACK: 1272 case SO_BROADCAST: 1273 case SO_REUSEADDR: 1274 case SO_REUSEPORT: 1275 case SO_OOBINLINE: 1276 case SO_TIMESTAMP: 1277 error = sooptcopyin(sopt, &optval, sizeof optval, 1278 sizeof optval); 1279 if (error) 1280 goto bad; 1281 if (optval) 1282 so->so_options |= sopt->sopt_name; 1283 else 1284 so->so_options &= ~sopt->sopt_name; 1285 break; 1286 1287 case SO_SNDBUF: 1288 case SO_RCVBUF: 1289 case SO_SNDLOWAT: 1290 case SO_RCVLOWAT: 1291 error = sooptcopyin(sopt, &optval, sizeof optval, 1292 sizeof optval); 1293 if (error) 1294 goto bad; 1295 1296 /* 1297 * Values < 1 make no sense for any of these 1298 * options, so disallow them. 1299 */ 1300 if (optval < 1) { 1301 error = EINVAL; 1302 goto bad; 1303 } 1304 1305 switch (sopt->sopt_name) { 1306 case SO_SNDBUF: 1307 case SO_RCVBUF: 1308 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 1309 &so->so_snd : &so->so_rcv, (u_long)optval, 1310 so, 1311 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 1312 error = ENOBUFS; 1313 goto bad; 1314 } 1315 break; 1316 1317 /* 1318 * Make sure the low-water is never greater than 1319 * the high-water. 1320 */ 1321 case SO_SNDLOWAT: 1322 so->so_snd.sb_lowat = 1323 (optval > so->so_snd.sb_hiwat) ? 1324 so->so_snd.sb_hiwat : optval; 1325 break; 1326 case SO_RCVLOWAT: 1327 so->so_rcv.sb_lowat = 1328 (optval > so->so_rcv.sb_hiwat) ? 1329 so->so_rcv.sb_hiwat : optval; 1330 break; 1331 } 1332 break; 1333 1334 case SO_SNDTIMEO: 1335 case SO_RCVTIMEO: 1336 error = sooptcopyin(sopt, &tv, sizeof tv, 1337 sizeof tv); 1338 if (error) 1339 goto bad; 1340 1341 /* assert(hz > 0); */ 1342 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz || 1343 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 1344 error = EDOM; 1345 goto bad; 1346 } 1347 /* assert(tick > 0); */ 1348 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */ 1349 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 1350 if (val > SHRT_MAX) { 1351 error = EDOM; 1352 goto bad; 1353 } 1354 if (val == 0 && tv.tv_usec != 0) 1355 val = 1; 1356 1357 switch (sopt->sopt_name) { 1358 case SO_SNDTIMEO: 1359 so->so_snd.sb_timeo = val; 1360 break; 1361 case SO_RCVTIMEO: 1362 so->so_rcv.sb_timeo = val; 1363 break; 1364 } 1365 break; 1366 default: 1367 error = ENOPROTOOPT; 1368 break; 1369 } 1370 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 1371 (void) so_pr_ctloutput(so, sopt); 1372 } 1373 } 1374 bad: 1375 return (error); 1376 } 1377 1378 /* Helper routine for getsockopt */ 1379 int 1380 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 1381 { 1382 int error; 1383 size_t valsize; 1384 1385 error = 0; 1386 1387 /* 1388 * Documented get behavior is that we always return a value, 1389 * possibly truncated to fit in the user's buffer. 1390 * Traditional behavior is that we always tell the user 1391 * precisely how much we copied, rather than something useful 1392 * like the total amount we had available for her. 1393 * Note that this interface is not idempotent; the entire answer must 1394 * generated ahead of time. 1395 */ 1396 valsize = min(len, sopt->sopt_valsize); 1397 sopt->sopt_valsize = valsize; 1398 if (sopt->sopt_val != 0) { 1399 if (sopt->sopt_td != NULL) 1400 error = copyout(buf, sopt->sopt_val, valsize); 1401 else 1402 bcopy(buf, sopt->sopt_val, valsize); 1403 } 1404 return error; 1405 } 1406 1407 int 1408 sogetopt(so, sopt) 1409 struct socket *so; 1410 struct sockopt *sopt; 1411 { 1412 int error, optval; 1413 struct linger l; 1414 struct timeval tv; 1415 #ifdef INET 1416 struct accept_filter_arg *afap; 1417 #endif 1418 1419 error = 0; 1420 if (sopt->sopt_level != SOL_SOCKET) { 1421 if (so->so_proto && so->so_proto->pr_ctloutput) { 1422 return (so_pr_ctloutput(so, sopt)); 1423 } else 1424 return (ENOPROTOOPT); 1425 } else { 1426 switch (sopt->sopt_name) { 1427 #ifdef INET 1428 case SO_ACCEPTFILTER: 1429 if ((so->so_options & SO_ACCEPTCONN) == 0) 1430 return (EINVAL); 1431 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), 1432 M_TEMP, M_WAITOK); 1433 bzero(afap, sizeof(*afap)); 1434 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 1435 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 1436 if (so->so_accf->so_accept_filter_str != NULL) 1437 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 1438 } 1439 error = sooptcopyout(sopt, afap, sizeof(*afap)); 1440 FREE(afap, M_TEMP); 1441 break; 1442 #endif /* INET */ 1443 1444 case SO_LINGER: 1445 l.l_onoff = so->so_options & SO_LINGER; 1446 l.l_linger = so->so_linger; 1447 error = sooptcopyout(sopt, &l, sizeof l); 1448 break; 1449 1450 case SO_USELOOPBACK: 1451 case SO_DONTROUTE: 1452 case SO_DEBUG: 1453 case SO_KEEPALIVE: 1454 case SO_REUSEADDR: 1455 case SO_REUSEPORT: 1456 case SO_BROADCAST: 1457 case SO_OOBINLINE: 1458 case SO_TIMESTAMP: 1459 optval = so->so_options & sopt->sopt_name; 1460 integer: 1461 error = sooptcopyout(sopt, &optval, sizeof optval); 1462 break; 1463 1464 case SO_TYPE: 1465 optval = so->so_type; 1466 goto integer; 1467 1468 case SO_ERROR: 1469 optval = so->so_error; 1470 so->so_error = 0; 1471 goto integer; 1472 1473 case SO_SNDBUF: 1474 optval = so->so_snd.sb_hiwat; 1475 goto integer; 1476 1477 case SO_RCVBUF: 1478 optval = so->so_rcv.sb_hiwat; 1479 goto integer; 1480 1481 case SO_SNDLOWAT: 1482 optval = so->so_snd.sb_lowat; 1483 goto integer; 1484 1485 case SO_RCVLOWAT: 1486 optval = so->so_rcv.sb_lowat; 1487 goto integer; 1488 1489 case SO_SNDTIMEO: 1490 case SO_RCVTIMEO: 1491 optval = (sopt->sopt_name == SO_SNDTIMEO ? 1492 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1493 1494 tv.tv_sec = optval / hz; 1495 tv.tv_usec = (optval % hz) * tick; 1496 error = sooptcopyout(sopt, &tv, sizeof tv); 1497 break; 1498 1499 default: 1500 error = ENOPROTOOPT; 1501 break; 1502 } 1503 return (error); 1504 } 1505 } 1506 1507 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 1508 int 1509 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 1510 { 1511 struct mbuf *m, *m_prev; 1512 int sopt_size = sopt->sopt_valsize; 1513 1514 MGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA); 1515 if (m == 0) 1516 return ENOBUFS; 1517 if (sopt_size > MLEN) { 1518 MCLGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT); 1519 if ((m->m_flags & M_EXT) == 0) { 1520 m_free(m); 1521 return ENOBUFS; 1522 } 1523 m->m_len = min(MCLBYTES, sopt_size); 1524 } else { 1525 m->m_len = min(MLEN, sopt_size); 1526 } 1527 sopt_size -= m->m_len; 1528 *mp = m; 1529 m_prev = m; 1530 1531 while (sopt_size) { 1532 MGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA); 1533 if (m == 0) { 1534 m_freem(*mp); 1535 return ENOBUFS; 1536 } 1537 if (sopt_size > MLEN) { 1538 MCLGET(m, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT); 1539 if ((m->m_flags & M_EXT) == 0) { 1540 m_freem(*mp); 1541 return ENOBUFS; 1542 } 1543 m->m_len = min(MCLBYTES, sopt_size); 1544 } else { 1545 m->m_len = min(MLEN, sopt_size); 1546 } 1547 sopt_size -= m->m_len; 1548 m_prev->m_next = m; 1549 m_prev = m; 1550 } 1551 return 0; 1552 } 1553 1554 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 1555 int 1556 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 1557 { 1558 struct mbuf *m0 = m; 1559 1560 if (sopt->sopt_val == NULL) 1561 return 0; 1562 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1563 if (sopt->sopt_td != NULL) { 1564 int error; 1565 1566 error = copyin(sopt->sopt_val, mtod(m, char *), 1567 m->m_len); 1568 if (error != 0) { 1569 m_freem(m0); 1570 return(error); 1571 } 1572 } else 1573 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 1574 sopt->sopt_valsize -= m->m_len; 1575 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len; 1576 m = m->m_next; 1577 } 1578 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 1579 panic("ip6_sooptmcopyin"); 1580 return 0; 1581 } 1582 1583 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 1584 int 1585 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 1586 { 1587 struct mbuf *m0 = m; 1588 size_t valsize = 0; 1589 1590 if (sopt->sopt_val == NULL) 1591 return 0; 1592 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1593 if (sopt->sopt_td != NULL) { 1594 int error; 1595 1596 error = copyout(mtod(m, char *), sopt->sopt_val, 1597 m->m_len); 1598 if (error != 0) { 1599 m_freem(m0); 1600 return(error); 1601 } 1602 } else 1603 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 1604 sopt->sopt_valsize -= m->m_len; 1605 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len; 1606 valsize += m->m_len; 1607 m = m->m_next; 1608 } 1609 if (m != NULL) { 1610 /* enough soopt buffer should be given from user-land */ 1611 m_freem(m0); 1612 return(EINVAL); 1613 } 1614 sopt->sopt_valsize = valsize; 1615 return 0; 1616 } 1617 1618 void 1619 sohasoutofband(so) 1620 struct socket *so; 1621 { 1622 if (so->so_sigio != NULL) 1623 pgsigio(so->so_sigio, SIGURG, 0); 1624 selwakeup(&so->so_rcv.sb_sel); 1625 } 1626 1627 int 1628 sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td) 1629 { 1630 int revents = 0; 1631 int s = splnet(); 1632 1633 if (events & (POLLIN | POLLRDNORM)) 1634 if (soreadable(so)) 1635 revents |= events & (POLLIN | POLLRDNORM); 1636 1637 if (events & POLLINIGNEOF) 1638 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 1639 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 1640 revents |= POLLINIGNEOF; 1641 1642 if (events & (POLLOUT | POLLWRNORM)) 1643 if (sowriteable(so)) 1644 revents |= events & (POLLOUT | POLLWRNORM); 1645 1646 if (events & (POLLPRI | POLLRDBAND)) 1647 if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) 1648 revents |= events & (POLLPRI | POLLRDBAND); 1649 1650 if (revents == 0) { 1651 if (events & 1652 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 1653 POLLRDBAND)) { 1654 selrecord(td, &so->so_rcv.sb_sel); 1655 so->so_rcv.sb_flags |= SB_SEL; 1656 } 1657 1658 if (events & (POLLOUT | POLLWRNORM)) { 1659 selrecord(td, &so->so_snd.sb_sel); 1660 so->so_snd.sb_flags |= SB_SEL; 1661 } 1662 } 1663 1664 splx(s); 1665 return (revents); 1666 } 1667 1668 int 1669 sokqfilter(struct file *fp, struct knote *kn) 1670 { 1671 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1672 struct sockbuf *sb; 1673 int s; 1674 1675 switch (kn->kn_filter) { 1676 case EVFILT_READ: 1677 if (so->so_options & SO_ACCEPTCONN) 1678 kn->kn_fop = &solisten_filtops; 1679 else 1680 kn->kn_fop = &soread_filtops; 1681 sb = &so->so_rcv; 1682 break; 1683 case EVFILT_WRITE: 1684 kn->kn_fop = &sowrite_filtops; 1685 sb = &so->so_snd; 1686 break; 1687 default: 1688 return (1); 1689 } 1690 1691 s = splnet(); 1692 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext); 1693 sb->sb_flags |= SB_KNOTE; 1694 splx(s); 1695 return (0); 1696 } 1697 1698 static void 1699 filt_sordetach(struct knote *kn) 1700 { 1701 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1702 int s = splnet(); 1703 1704 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); 1705 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) 1706 so->so_rcv.sb_flags &= ~SB_KNOTE; 1707 splx(s); 1708 } 1709 1710 /*ARGSUSED*/ 1711 static int 1712 filt_soread(struct knote *kn, long hint) 1713 { 1714 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1715 1716 kn->kn_data = so->so_rcv.sb_cc; 1717 if (so->so_state & SS_CANTRCVMORE) { 1718 kn->kn_flags |= EV_EOF; 1719 kn->kn_fflags = so->so_error; 1720 return (1); 1721 } 1722 if (so->so_error) /* temporary udp error */ 1723 return (1); 1724 if (kn->kn_sfflags & NOTE_LOWAT) 1725 return (kn->kn_data >= kn->kn_sdata); 1726 return (kn->kn_data >= so->so_rcv.sb_lowat); 1727 } 1728 1729 static void 1730 filt_sowdetach(struct knote *kn) 1731 { 1732 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1733 int s = splnet(); 1734 1735 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); 1736 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) 1737 so->so_snd.sb_flags &= ~SB_KNOTE; 1738 splx(s); 1739 } 1740 1741 /*ARGSUSED*/ 1742 static int 1743 filt_sowrite(struct knote *kn, long hint) 1744 { 1745 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1746 1747 kn->kn_data = sbspace(&so->so_snd); 1748 if (so->so_state & SS_CANTSENDMORE) { 1749 kn->kn_flags |= EV_EOF; 1750 kn->kn_fflags = so->so_error; 1751 return (1); 1752 } 1753 if (so->so_error) /* temporary udp error */ 1754 return (1); 1755 if (((so->so_state & SS_ISCONNECTED) == 0) && 1756 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1757 return (0); 1758 if (kn->kn_sfflags & NOTE_LOWAT) 1759 return (kn->kn_data >= kn->kn_sdata); 1760 return (kn->kn_data >= so->so_snd.sb_lowat); 1761 } 1762 1763 /*ARGSUSED*/ 1764 static int 1765 filt_solisten(struct knote *kn, long hint) 1766 { 1767 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1768 1769 kn->kn_data = so->so_qlen; 1770 return (! TAILQ_EMPTY(&so->so_comp)); 1771 } 1772