1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_socket.c 8.1 (Berkeley) 06/10/93 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/proc.h> 13 #include <sys/file.h> 14 #include <sys/malloc.h> 15 #include <sys/mbuf.h> 16 #include <sys/domain.h> 17 #include <sys/kernel.h> 18 #include <sys/protosw.h> 19 #include <sys/socket.h> 20 #include <sys/socketvar.h> 21 #include <sys/resourcevar.h> 22 23 /* 24 * Socket operation routines. 25 * These routines are called by the routines in 26 * sys_socket.c or from a system process, and 27 * implement the semantics of socket operations by 28 * switching out to the protocol specific routines. 29 */ 30 /*ARGSUSED*/ 31 socreate(dom, aso, type, proto) 32 int dom; 33 struct socket **aso; 34 register int type; 35 int proto; 36 { 37 struct proc *p = curproc; /* XXX */ 38 register struct protosw *prp; 39 register struct socket *so; 40 register int error; 41 42 if (proto) 43 prp = pffindproto(dom, proto, type); 44 else 45 prp = pffindtype(dom, type); 46 if (prp == 0 || prp->pr_usrreq == 0) 47 return (EPROTONOSUPPORT); 48 if (prp->pr_type != type) 49 return (EPROTOTYPE); 50 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 51 bzero((caddr_t)so, sizeof(*so)); 52 so->so_type = type; 53 if (p->p_ucred->cr_uid == 0) 54 so->so_state = SS_PRIV; 55 so->so_proto = prp; 56 error = 57 (*prp->pr_usrreq)(so, PRU_ATTACH, 58 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 59 if (error) { 60 so->so_state |= SS_NOFDREF; 61 sofree(so); 62 return (error); 63 } 64 *aso = so; 65 return (0); 66 } 67 68 sobind(so, nam) 69 struct socket *so; 70 struct mbuf *nam; 71 { 72 int s = splnet(); 73 int error; 74 75 error = 76 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 77 (struct mbuf *)0, nam, (struct mbuf *)0); 78 splx(s); 79 return (error); 80 } 81 82 solisten(so, backlog) 83 register struct socket *so; 84 int backlog; 85 { 86 int s = splnet(), error; 87 88 error = 89 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 90 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 91 if (error) { 92 splx(s); 93 return (error); 94 } 95 if (so->so_q == 0) 96 so->so_options |= SO_ACCEPTCONN; 97 if (backlog < 0) 98 backlog = 0; 99 so->so_qlimit = min(backlog, SOMAXCONN); 100 splx(s); 101 return (0); 102 } 103 104 sofree(so) 105 register struct socket *so; 106 { 107 108 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 109 return; 110 if (so->so_head) { 111 if (!soqremque(so, 0) && !soqremque(so, 1)) 112 panic("sofree dq"); 113 so->so_head = 0; 114 } 115 sbrelease(&so->so_snd); 116 sorflush(so); 117 FREE(so, M_SOCKET); 118 } 119 120 /* 121 * Close a socket on last file table reference removal. 122 * Initiate disconnect if connected. 123 * Free socket when disconnect complete. 124 */ 125 soclose(so) 126 register struct socket *so; 127 { 128 int s = splnet(); /* conservative */ 129 int error = 0; 130 131 if (so->so_options & SO_ACCEPTCONN) { 132 while (so->so_q0) 133 (void) soabort(so->so_q0); 134 while (so->so_q) 135 (void) soabort(so->so_q); 136 } 137 if (so->so_pcb == 0) 138 goto discard; 139 if (so->so_state & SS_ISCONNECTED) { 140 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 141 error = sodisconnect(so); 142 if (error) 143 goto drop; 144 } 145 if (so->so_options & SO_LINGER) { 146 if ((so->so_state & SS_ISDISCONNECTING) && 147 (so->so_state & SS_NBIO)) 148 goto drop; 149 while (so->so_state & SS_ISCONNECTED) 150 if (error = tsleep((caddr_t)&so->so_timeo, 151 PSOCK | PCATCH, netcls, so->so_linger)) 152 break; 153 } 154 } 155 drop: 156 if (so->so_pcb) { 157 int error2 = 158 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 159 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 160 if (error == 0) 161 error = error2; 162 } 163 discard: 164 if (so->so_state & SS_NOFDREF) 165 panic("soclose: NOFDREF"); 166 so->so_state |= SS_NOFDREF; 167 sofree(so); 168 splx(s); 169 return (error); 170 } 171 172 /* 173 * Must be called at splnet... 174 */ 175 soabort(so) 176 struct socket *so; 177 { 178 179 return ( 180 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 181 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 182 } 183 184 soaccept(so, nam) 185 register struct socket *so; 186 struct mbuf *nam; 187 { 188 int s = splnet(); 189 int error; 190 191 if ((so->so_state & SS_NOFDREF) == 0) 192 panic("soaccept: !NOFDREF"); 193 so->so_state &= ~SS_NOFDREF; 194 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 195 (struct mbuf *)0, nam, (struct mbuf *)0); 196 splx(s); 197 return (error); 198 } 199 200 soconnect(so, nam) 201 register struct socket *so; 202 struct mbuf *nam; 203 { 204 int s; 205 int error; 206 207 if (so->so_options & SO_ACCEPTCONN) 208 return (EOPNOTSUPP); 209 s = splnet(); 210 /* 211 * If protocol is connection-based, can only connect once. 212 * Otherwise, if connected, try to disconnect first. 213 * This allows user to disconnect by connecting to, e.g., 214 * a null address. 215 */ 216 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 217 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 218 (error = sodisconnect(so)))) 219 error = EISCONN; 220 else 221 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 222 (struct mbuf *)0, nam, (struct mbuf *)0); 223 splx(s); 224 return (error); 225 } 226 227 soconnect2(so1, so2) 228 register struct socket *so1; 229 struct socket *so2; 230 { 231 int s = splnet(); 232 int error; 233 234 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 235 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 236 splx(s); 237 return (error); 238 } 239 240 sodisconnect(so) 241 register struct socket *so; 242 { 243 int s = splnet(); 244 int error; 245 246 if ((so->so_state & SS_ISCONNECTED) == 0) { 247 error = ENOTCONN; 248 goto bad; 249 } 250 if (so->so_state & SS_ISDISCONNECTING) { 251 error = EALREADY; 252 goto bad; 253 } 254 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 255 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 256 bad: 257 splx(s); 258 return (error); 259 } 260 261 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 262 /* 263 * Send on a socket. 264 * If send must go all at once and message is larger than 265 * send buffering, then hard error. 266 * Lock against other senders. 267 * If must go all at once and not enough room now, then 268 * inform user that this would block and do nothing. 269 * Otherwise, if nonblocking, send as much as possible. 270 * The data to be sent is described by "uio" if nonzero, 271 * otherwise by the mbuf chain "top" (which must be null 272 * if uio is not). Data provided in mbuf chain must be small 273 * enough to send all at once. 274 * 275 * Returns nonzero on error, timeout or signal; callers 276 * must check for short counts if EINTR/ERESTART are returned. 277 * Data and control buffers are freed on return. 278 */ 279 sosend(so, addr, uio, top, control, flags) 280 register struct socket *so; 281 struct mbuf *addr; 282 struct uio *uio; 283 struct mbuf *top; 284 struct mbuf *control; 285 int flags; 286 { 287 struct proc *p = curproc; /* XXX */ 288 struct mbuf **mp; 289 register struct mbuf *m; 290 register long space, len, resid; 291 int clen = 0, error, s, dontroute, mlen; 292 int atomic = sosendallatonce(so) || top; 293 294 if (uio) 295 resid = uio->uio_resid; 296 else 297 resid = top->m_pkthdr.len; 298 dontroute = 299 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 300 (so->so_proto->pr_flags & PR_ATOMIC); 301 p->p_stats->p_ru.ru_msgsnd++; 302 if (control) 303 clen = control->m_len; 304 #define snderr(errno) { error = errno; splx(s); goto release; } 305 306 restart: 307 if (error = sblock(&so->so_snd, SBLOCKWAIT(flags))) 308 goto out; 309 do { 310 s = splnet(); 311 if (so->so_state & SS_CANTSENDMORE) 312 snderr(EPIPE); 313 if (so->so_error) 314 snderr(so->so_error); 315 if ((so->so_state & SS_ISCONNECTED) == 0) { 316 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 317 if ((so->so_state & SS_ISCONFIRMING) == 0 && 318 !(resid == 0 && clen != 0)) 319 snderr(ENOTCONN); 320 } else if (addr == 0) 321 snderr(EDESTADDRREQ); 322 } 323 space = sbspace(&so->so_snd); 324 if (flags & MSG_OOB) 325 space += 1024; 326 if (atomic && resid > so->so_snd.sb_hiwat || 327 clen > so->so_snd.sb_hiwat) 328 snderr(EMSGSIZE); 329 if (space < resid + clen && uio && 330 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 331 if (so->so_state & SS_NBIO) 332 snderr(EWOULDBLOCK); 333 sbunlock(&so->so_snd); 334 error = sbwait(&so->so_snd); 335 splx(s); 336 if (error) 337 goto out; 338 goto restart; 339 } 340 splx(s); 341 mp = ⊤ 342 space -= clen; 343 do { 344 if (uio == NULL) { 345 /* 346 * Data is prepackaged in "top". 347 */ 348 resid = 0; 349 if (flags & MSG_EOR) 350 top->m_flags |= M_EOR; 351 } else do { 352 if (top == 0) { 353 MGETHDR(m, M_WAIT, MT_DATA); 354 mlen = MHLEN; 355 m->m_pkthdr.len = 0; 356 m->m_pkthdr.rcvif = (struct ifnet *)0; 357 } else { 358 MGET(m, M_WAIT, MT_DATA); 359 mlen = MLEN; 360 } 361 if (resid >= MINCLSIZE && space >= MCLBYTES) { 362 MCLGET(m, M_WAIT); 363 if ((m->m_flags & M_EXT) == 0) 364 goto nopages; 365 mlen = MCLBYTES; 366 #ifdef MAPPED_MBUFS 367 len = min(MCLBYTES, resid); 368 #else 369 if (atomic && top == 0) { 370 len = min(MCLBYTES - max_hdr, resid); 371 m->m_data += max_hdr; 372 } else 373 len = min(MCLBYTES, resid); 374 #endif 375 space -= MCLBYTES; 376 } else { 377 nopages: 378 len = min(min(mlen, resid), space); 379 space -= len; 380 /* 381 * For datagram protocols, leave room 382 * for protocol headers in first mbuf. 383 */ 384 if (atomic && top == 0 && len < mlen) 385 MH_ALIGN(m, len); 386 } 387 error = uiomove(mtod(m, caddr_t), (int)len, uio); 388 resid = uio->uio_resid; 389 m->m_len = len; 390 *mp = m; 391 top->m_pkthdr.len += len; 392 if (error) 393 goto release; 394 mp = &m->m_next; 395 if (resid <= 0) { 396 if (flags & MSG_EOR) 397 top->m_flags |= M_EOR; 398 break; 399 } 400 } while (space > 0 && atomic); 401 if (dontroute) 402 so->so_options |= SO_DONTROUTE; 403 s = splnet(); /* XXX */ 404 error = (*so->so_proto->pr_usrreq)(so, 405 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 406 top, addr, control); 407 splx(s); 408 if (dontroute) 409 so->so_options &= ~SO_DONTROUTE; 410 clen = 0; 411 control = 0; 412 top = 0; 413 mp = ⊤ 414 if (error) 415 goto release; 416 } while (resid && space > 0); 417 } while (resid); 418 419 release: 420 sbunlock(&so->so_snd); 421 out: 422 if (top) 423 m_freem(top); 424 if (control) 425 m_freem(control); 426 return (error); 427 } 428 429 /* 430 * Implement receive operations on a socket. 431 * We depend on the way that records are added to the sockbuf 432 * by sbappend*. In particular, each record (mbufs linked through m_next) 433 * must begin with an address if the protocol so specifies, 434 * followed by an optional mbuf or mbufs containing ancillary data, 435 * and then zero or more mbufs of data. 436 * In order to avoid blocking network interrupts for the entire time here, 437 * we splx() while doing the actual copy to user space. 438 * Although the sockbuf is locked, new data may still be appended, 439 * and thus we must maintain consistency of the sockbuf during that time. 440 * 441 * The caller may receive the data as a single mbuf chain by supplying 442 * an mbuf **mp0 for use in returning the chain. The uio is then used 443 * only for the count in uio_resid. 444 */ 445 soreceive(so, paddr, uio, mp0, controlp, flagsp) 446 register struct socket *so; 447 struct mbuf **paddr; 448 struct uio *uio; 449 struct mbuf **mp0; 450 struct mbuf **controlp; 451 int *flagsp; 452 { 453 register struct mbuf *m, **mp; 454 register int flags, len, error, s, offset; 455 struct protosw *pr = so->so_proto; 456 struct mbuf *nextrecord; 457 int moff, type; 458 int orig_resid = uio->uio_resid; 459 460 mp = mp0; 461 if (paddr) 462 *paddr = 0; 463 if (controlp) 464 *controlp = 0; 465 if (flagsp) 466 flags = *flagsp &~ MSG_EOR; 467 else 468 flags = 0; 469 if (flags & MSG_OOB) { 470 m = m_get(M_WAIT, MT_DATA); 471 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 472 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 473 if (error) 474 goto bad; 475 do { 476 error = uiomove(mtod(m, caddr_t), 477 (int) min(uio->uio_resid, m->m_len), uio); 478 m = m_free(m); 479 } while (uio->uio_resid && error == 0 && m); 480 bad: 481 if (m) 482 m_freem(m); 483 return (error); 484 } 485 if (mp) 486 *mp = (struct mbuf *)0; 487 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 488 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 489 (struct mbuf *)0, (struct mbuf *)0); 490 491 restart: 492 if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) 493 return (error); 494 s = splnet(); 495 496 m = so->so_rcv.sb_mb; 497 /* 498 * If we have less data than requested, block awaiting more 499 * (subject to any timeout) if: 500 * 1. the current count is less than the low water mark, or 501 * 2. MSG_WAITALL is set, and it is possible to do the entire 502 * receive operation at once if we block (resid <= hiwat). 503 * 3. MSG_DONTWAIT is not set 504 * If MSG_WAITALL is set but resid is larger than the receive buffer, 505 * we have to do the receive in sections, and thus risk returning 506 * a short count if a timeout or signal occurs after we start. 507 */ 508 if (m == 0 || ((flags & MSG_DONTWAIT) == 0 && 509 so->so_rcv.sb_cc < uio->uio_resid) && 510 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 511 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 512 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0) { 513 #ifdef DIAGNOSTIC 514 if (m == 0 && so->so_rcv.sb_cc) 515 panic("receive 1"); 516 #endif 517 if (so->so_error) { 518 if (m) 519 goto dontblock; 520 error = so->so_error; 521 if ((flags & MSG_PEEK) == 0) 522 so->so_error = 0; 523 goto release; 524 } 525 if (so->so_state & SS_CANTRCVMORE) { 526 if (m) 527 goto dontblock; 528 else 529 goto release; 530 } 531 for (; m; m = m->m_next) 532 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 533 m = so->so_rcv.sb_mb; 534 goto dontblock; 535 } 536 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 537 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 538 error = ENOTCONN; 539 goto release; 540 } 541 if (uio->uio_resid == 0) 542 goto release; 543 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) { 544 error = EWOULDBLOCK; 545 goto release; 546 } 547 sbunlock(&so->so_rcv); 548 error = sbwait(&so->so_rcv); 549 splx(s); 550 if (error) 551 return (error); 552 goto restart; 553 } 554 dontblock: 555 if (uio->uio_procp) 556 uio->uio_procp->p_stats->p_ru.ru_msgrcv++; 557 nextrecord = m->m_nextpkt; 558 if (pr->pr_flags & PR_ADDR) { 559 #ifdef DIAGNOSTIC 560 if (m->m_type != MT_SONAME) 561 panic("receive 1a"); 562 #endif 563 orig_resid = 0; 564 if (flags & MSG_PEEK) { 565 if (paddr) 566 *paddr = m_copy(m, 0, m->m_len); 567 m = m->m_next; 568 } else { 569 sbfree(&so->so_rcv, m); 570 if (paddr) { 571 *paddr = m; 572 so->so_rcv.sb_mb = m->m_next; 573 m->m_next = 0; 574 m = so->so_rcv.sb_mb; 575 } else { 576 MFREE(m, so->so_rcv.sb_mb); 577 m = so->so_rcv.sb_mb; 578 } 579 } 580 } 581 while (m && m->m_type == MT_CONTROL && error == 0) { 582 if (flags & MSG_PEEK) { 583 if (controlp) 584 *controlp = m_copy(m, 0, m->m_len); 585 m = m->m_next; 586 } else { 587 sbfree(&so->so_rcv, m); 588 if (controlp) { 589 if (pr->pr_domain->dom_externalize && 590 mtod(m, struct cmsghdr *)->cmsg_type == 591 SCM_RIGHTS) 592 error = (*pr->pr_domain->dom_externalize)(m); 593 *controlp = m; 594 so->so_rcv.sb_mb = m->m_next; 595 m->m_next = 0; 596 m = so->so_rcv.sb_mb; 597 } else { 598 MFREE(m, so->so_rcv.sb_mb); 599 m = so->so_rcv.sb_mb; 600 } 601 } 602 if (controlp) { 603 orig_resid = 0; 604 controlp = &(*controlp)->m_next; 605 } 606 } 607 if (m) { 608 if ((flags & MSG_PEEK) == 0) 609 m->m_nextpkt = nextrecord; 610 type = m->m_type; 611 if (type == MT_OOBDATA) 612 flags |= MSG_OOB; 613 } 614 moff = 0; 615 offset = 0; 616 while (m && uio->uio_resid > 0 && error == 0) { 617 if (m->m_type == MT_OOBDATA) { 618 if (type != MT_OOBDATA) 619 break; 620 } else if (type == MT_OOBDATA) 621 break; 622 #ifdef DIAGNOSTIC 623 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 624 panic("receive 3"); 625 #endif 626 so->so_state &= ~SS_RCVATMARK; 627 len = uio->uio_resid; 628 if (so->so_oobmark && len > so->so_oobmark - offset) 629 len = so->so_oobmark - offset; 630 if (len > m->m_len - moff) 631 len = m->m_len - moff; 632 /* 633 * If mp is set, just pass back the mbufs. 634 * Otherwise copy them out via the uio, then free. 635 * Sockbuf must be consistent here (points to current mbuf, 636 * it points to next record) when we drop priority; 637 * we must note any additions to the sockbuf when we 638 * block interrupts again. 639 */ 640 if (mp == 0) { 641 splx(s); 642 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 643 s = splnet(); 644 } else 645 uio->uio_resid -= len; 646 if (len == m->m_len - moff) { 647 if (m->m_flags & M_EOR) 648 flags |= MSG_EOR; 649 if (flags & MSG_PEEK) { 650 m = m->m_next; 651 moff = 0; 652 } else { 653 nextrecord = m->m_nextpkt; 654 sbfree(&so->so_rcv, m); 655 if (mp) { 656 *mp = m; 657 mp = &m->m_next; 658 so->so_rcv.sb_mb = m = m->m_next; 659 *mp = (struct mbuf *)0; 660 } else { 661 MFREE(m, so->so_rcv.sb_mb); 662 m = so->so_rcv.sb_mb; 663 } 664 if (m) 665 m->m_nextpkt = nextrecord; 666 } 667 } else { 668 if (flags & MSG_PEEK) 669 moff += len; 670 else { 671 if (mp) 672 *mp = m_copym(m, 0, len, M_WAIT); 673 m->m_data += len; 674 m->m_len -= len; 675 so->so_rcv.sb_cc -= len; 676 } 677 } 678 if (so->so_oobmark) { 679 if ((flags & MSG_PEEK) == 0) { 680 so->so_oobmark -= len; 681 if (so->so_oobmark == 0) { 682 so->so_state |= SS_RCVATMARK; 683 break; 684 } 685 } else { 686 offset += len; 687 if (offset == so->so_oobmark) 688 break; 689 } 690 } 691 if (flags & MSG_EOR) 692 break; 693 /* 694 * If the MSG_WAITALL flag is set (for non-atomic socket), 695 * we must not quit until "uio->uio_resid == 0" or an error 696 * termination. If a signal/timeout occurs, return 697 * with a short count but without error. 698 * Keep sockbuf locked against other readers. 699 */ 700 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && 701 !sosendallatonce(so) && !nextrecord) { 702 if (so->so_error || so->so_state & SS_CANTRCVMORE) 703 break; 704 error = sbwait(&so->so_rcv); 705 if (error) { 706 sbunlock(&so->so_rcv); 707 splx(s); 708 return (0); 709 } 710 if (m = so->so_rcv.sb_mb) 711 nextrecord = m->m_nextpkt; 712 } 713 } 714 715 if (m && pr->pr_flags & PR_ATOMIC) { 716 flags |= MSG_TRUNC; 717 if ((flags & MSG_PEEK) == 0) 718 (void) sbdroprecord(&so->so_rcv); 719 } 720 if ((flags & MSG_PEEK) == 0) { 721 if (m == 0) 722 so->so_rcv.sb_mb = nextrecord; 723 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 724 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 725 (struct mbuf *)flags, (struct mbuf *)0, 726 (struct mbuf *)0); 727 } 728 if (orig_resid == uio->uio_resid && orig_resid && 729 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 730 sbunlock(&so->so_rcv); 731 splx(s); 732 goto restart; 733 } 734 735 if (flagsp) 736 *flagsp |= flags; 737 release: 738 sbunlock(&so->so_rcv); 739 splx(s); 740 return (error); 741 } 742 743 soshutdown(so, how) 744 register struct socket *so; 745 register int how; 746 { 747 register struct protosw *pr = so->so_proto; 748 749 how++; 750 if (how & FREAD) 751 sorflush(so); 752 if (how & FWRITE) 753 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 754 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 755 return (0); 756 } 757 758 sorflush(so) 759 register struct socket *so; 760 { 761 register struct sockbuf *sb = &so->so_rcv; 762 register struct protosw *pr = so->so_proto; 763 register int s; 764 struct sockbuf asb; 765 766 sb->sb_flags |= SB_NOINTR; 767 (void) sblock(sb, M_WAITOK); 768 s = splimp(); 769 socantrcvmore(so); 770 sbunlock(sb); 771 asb = *sb; 772 bzero((caddr_t)sb, sizeof (*sb)); 773 splx(s); 774 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 775 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 776 sbrelease(&asb); 777 } 778 779 sosetopt(so, level, optname, m0) 780 register struct socket *so; 781 int level, optname; 782 struct mbuf *m0; 783 { 784 int error = 0; 785 register struct mbuf *m = m0; 786 787 if (level != SOL_SOCKET) { 788 if (so->so_proto && so->so_proto->pr_ctloutput) 789 return ((*so->so_proto->pr_ctloutput) 790 (PRCO_SETOPT, so, level, optname, &m0)); 791 error = ENOPROTOOPT; 792 } else { 793 switch (optname) { 794 795 case SO_LINGER: 796 if (m == NULL || m->m_len != sizeof (struct linger)) { 797 error = EINVAL; 798 goto bad; 799 } 800 so->so_linger = mtod(m, struct linger *)->l_linger; 801 /* fall thru... */ 802 803 case SO_DEBUG: 804 case SO_KEEPALIVE: 805 case SO_DONTROUTE: 806 case SO_USELOOPBACK: 807 case SO_BROADCAST: 808 case SO_REUSEADDR: 809 case SO_REUSEPORT: 810 case SO_OOBINLINE: 811 if (m == NULL || m->m_len < sizeof (int)) { 812 error = EINVAL; 813 goto bad; 814 } 815 if (*mtod(m, int *)) 816 so->so_options |= optname; 817 else 818 so->so_options &= ~optname; 819 break; 820 821 case SO_SNDBUF: 822 case SO_RCVBUF: 823 case SO_SNDLOWAT: 824 case SO_RCVLOWAT: 825 if (m == NULL || m->m_len < sizeof (int)) { 826 error = EINVAL; 827 goto bad; 828 } 829 switch (optname) { 830 831 case SO_SNDBUF: 832 case SO_RCVBUF: 833 if (sbreserve(optname == SO_SNDBUF ? 834 &so->so_snd : &so->so_rcv, 835 (u_long) *mtod(m, int *)) == 0) { 836 error = ENOBUFS; 837 goto bad; 838 } 839 break; 840 841 case SO_SNDLOWAT: 842 so->so_snd.sb_lowat = *mtod(m, int *); 843 break; 844 case SO_RCVLOWAT: 845 so->so_rcv.sb_lowat = *mtod(m, int *); 846 break; 847 } 848 break; 849 850 case SO_SNDTIMEO: 851 case SO_RCVTIMEO: 852 { 853 struct timeval *tv; 854 short val; 855 856 if (m == NULL || m->m_len < sizeof (*tv)) { 857 error = EINVAL; 858 goto bad; 859 } 860 tv = mtod(m, struct timeval *); 861 if (tv->tv_sec > SHRT_MAX / hz - hz) { 862 error = EDOM; 863 goto bad; 864 } 865 val = tv->tv_sec * hz + tv->tv_usec / tick; 866 867 switch (optname) { 868 869 case SO_SNDTIMEO: 870 so->so_snd.sb_timeo = val; 871 break; 872 case SO_RCVTIMEO: 873 so->so_rcv.sb_timeo = val; 874 break; 875 } 876 break; 877 } 878 879 default: 880 error = ENOPROTOOPT; 881 break; 882 } 883 m = 0; 884 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 885 (void) ((*so->so_proto->pr_ctloutput) 886 (PRCO_SETOPT, so, level, optname, &m0)); 887 } 888 bad: 889 if (m) 890 (void) m_free(m); 891 return (error); 892 } 893 894 sogetopt(so, level, optname, mp) 895 register struct socket *so; 896 int level, optname; 897 struct mbuf **mp; 898 { 899 register struct mbuf *m; 900 901 if (level != SOL_SOCKET) { 902 if (so->so_proto && so->so_proto->pr_ctloutput) { 903 return ((*so->so_proto->pr_ctloutput) 904 (PRCO_GETOPT, so, level, optname, mp)); 905 } else 906 return (ENOPROTOOPT); 907 } else { 908 m = m_get(M_WAIT, MT_SOOPTS); 909 m->m_len = sizeof (int); 910 911 switch (optname) { 912 913 case SO_LINGER: 914 m->m_len = sizeof (struct linger); 915 mtod(m, struct linger *)->l_onoff = 916 so->so_options & SO_LINGER; 917 mtod(m, struct linger *)->l_linger = so->so_linger; 918 break; 919 920 case SO_USELOOPBACK: 921 case SO_DONTROUTE: 922 case SO_DEBUG: 923 case SO_KEEPALIVE: 924 case SO_REUSEADDR: 925 case SO_REUSEPORT: 926 case SO_BROADCAST: 927 case SO_OOBINLINE: 928 *mtod(m, int *) = so->so_options & optname; 929 break; 930 931 case SO_TYPE: 932 *mtod(m, int *) = so->so_type; 933 break; 934 935 case SO_ERROR: 936 *mtod(m, int *) = so->so_error; 937 so->so_error = 0; 938 break; 939 940 case SO_SNDBUF: 941 *mtod(m, int *) = so->so_snd.sb_hiwat; 942 break; 943 944 case SO_RCVBUF: 945 *mtod(m, int *) = so->so_rcv.sb_hiwat; 946 break; 947 948 case SO_SNDLOWAT: 949 *mtod(m, int *) = so->so_snd.sb_lowat; 950 break; 951 952 case SO_RCVLOWAT: 953 *mtod(m, int *) = so->so_rcv.sb_lowat; 954 break; 955 956 case SO_SNDTIMEO: 957 case SO_RCVTIMEO: 958 { 959 int val = (optname == SO_SNDTIMEO ? 960 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 961 962 m->m_len = sizeof(struct timeval); 963 mtod(m, struct timeval *)->tv_sec = val / hz; 964 mtod(m, struct timeval *)->tv_usec = 965 (val % hz) / tick; 966 break; 967 } 968 969 default: 970 (void)m_free(m); 971 return (ENOPROTOOPT); 972 } 973 *mp = m; 974 return (0); 975 } 976 } 977 978 sohasoutofband(so) 979 register struct socket *so; 980 { 981 struct proc *p; 982 983 if (so->so_pgid < 0) 984 gsignal(-so->so_pgid, SIGURG); 985 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 986 psignal(p, SIGURG); 987 selwakeup(&so->so_rcv.sb_sel); 988 } 989