1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_socket.c 7.26 (Berkeley) 02/19/91 8 */ 9 10 #include "param.h" 11 #include "user.h" 12 #include "proc.h" 13 #include "file.h" 14 #include "malloc.h" 15 #include "mbuf.h" 16 #include "domain.h" 17 #include "kernel.h" 18 #include "protosw.h" 19 #include "socket.h" 20 #include "socketvar.h" 21 #include "time.h" 22 23 /* 24 * Socket operation routines. 25 * These routines are called by the routines in 26 * sys_socket.c or from a system process, and 27 * implement the semantics of socket operations by 28 * switching out to the protocol specific routines. 29 * 30 * TODO: 31 * test socketpair 32 * clean up async 33 * out-of-band is a kludge 34 */ 35 /*ARGSUSED*/ 36 socreate(dom, aso, type, proto) 37 struct socket **aso; 38 register int type; 39 int proto; 40 { 41 register struct protosw *prp; 42 register struct socket *so; 43 register int error; 44 45 if (proto) 46 prp = pffindproto(dom, proto, type); 47 else 48 prp = pffindtype(dom, type); 49 if (prp == 0) 50 return (EPROTONOSUPPORT); 51 if (prp->pr_type != type) 52 return (EPROTOTYPE); 53 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 54 bzero((caddr_t)so, sizeof(*so)); 55 so->so_type = type; 56 if (u.u_uid == 0) 57 so->so_state = SS_PRIV; 58 so->so_proto = prp; 59 error = 60 (*prp->pr_usrreq)(so, PRU_ATTACH, 61 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 62 if (error) { 63 so->so_state |= SS_NOFDREF; 64 sofree(so); 65 return (error); 66 } 67 *aso = so; 68 return (0); 69 } 70 71 sobind(so, nam) 72 struct socket *so; 73 struct mbuf *nam; 74 { 75 int s = splnet(); 76 int error; 77 78 error = 79 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 80 (struct mbuf *)0, nam, (struct mbuf *)0); 81 splx(s); 82 return (error); 83 } 84 85 solisten(so, backlog) 86 register struct socket *so; 87 int backlog; 88 { 89 int s = splnet(), error; 90 91 error = 92 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 93 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 94 if (error) { 95 splx(s); 96 return (error); 97 } 98 if (so->so_q == 0) 99 so->so_options |= SO_ACCEPTCONN; 100 if (backlog < 0) 101 backlog = 0; 102 so->so_qlimit = min(backlog, SOMAXCONN); 103 splx(s); 104 return (0); 105 } 106 107 sofree(so) 108 register struct socket *so; 109 { 110 111 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 112 return; 113 if (so->so_head) { 114 if (!soqremque(so, 0) && !soqremque(so, 1)) 115 panic("sofree dq"); 116 so->so_head = 0; 117 } 118 sbrelease(&so->so_snd); 119 sorflush(so); 120 FREE(so, M_SOCKET); 121 } 122 123 /* 124 * Close a socket on last file table reference removal. 125 * Initiate disconnect if connected. 126 * Free socket when disconnect complete. 127 */ 128 soclose(so) 129 register struct socket *so; 130 { 131 int s = splnet(); /* conservative */ 132 int error = 0; 133 134 if (so->so_options & SO_ACCEPTCONN) { 135 while (so->so_q0) 136 (void) soabort(so->so_q0); 137 while (so->so_q) 138 (void) soabort(so->so_q); 139 } 140 if (so->so_pcb == 0) 141 goto discard; 142 if (so->so_state & SS_ISCONNECTED) { 143 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 144 error = sodisconnect(so); 145 if (error) 146 goto drop; 147 } 148 if (so->so_options & SO_LINGER) { 149 if ((so->so_state & SS_ISDISCONNECTING) && 150 (so->so_state & SS_NBIO)) 151 goto drop; 152 while (so->so_state & SS_ISCONNECTED) 153 if (error = tsleep((caddr_t)&so->so_timeo, 154 PSOCK | PCATCH, netcls, so->so_linger)) 155 break; 156 } 157 } 158 drop: 159 if (so->so_pcb) { 160 int error2 = 161 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 162 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 163 if (error == 0) 164 error = error2; 165 } 166 discard: 167 if (so->so_state & SS_NOFDREF) 168 panic("soclose: NOFDREF"); 169 so->so_state |= SS_NOFDREF; 170 sofree(so); 171 splx(s); 172 return (error); 173 } 174 175 /* 176 * Must be called at splnet... 177 */ 178 soabort(so) 179 struct socket *so; 180 { 181 182 return ( 183 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 184 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 185 } 186 187 soaccept(so, nam) 188 register struct socket *so; 189 struct mbuf *nam; 190 { 191 int s = splnet(); 192 int error; 193 194 if ((so->so_state & SS_NOFDREF) == 0) 195 panic("soaccept: !NOFDREF"); 196 so->so_state &= ~SS_NOFDREF; 197 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 198 (struct mbuf *)0, nam, (struct mbuf *)0); 199 splx(s); 200 return (error); 201 } 202 203 soconnect(so, nam) 204 register struct socket *so; 205 struct mbuf *nam; 206 { 207 int s; 208 int error; 209 210 if (so->so_options & SO_ACCEPTCONN) 211 return (EOPNOTSUPP); 212 s = splnet(); 213 /* 214 * If protocol is connection-based, can only connect once. 215 * Otherwise, if connected, try to disconnect first. 216 * This allows user to disconnect by connecting to, e.g., 217 * a null address. 218 */ 219 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 220 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 221 (error = sodisconnect(so)))) 222 error = EISCONN; 223 else 224 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 225 (struct mbuf *)0, nam, (struct mbuf *)0); 226 splx(s); 227 return (error); 228 } 229 230 soconnect2(so1, so2) 231 register struct socket *so1; 232 struct socket *so2; 233 { 234 int s = splnet(); 235 int error; 236 237 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 238 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 239 splx(s); 240 return (error); 241 } 242 243 sodisconnect(so) 244 register struct socket *so; 245 { 246 int s = splnet(); 247 int error; 248 249 if ((so->so_state & SS_ISCONNECTED) == 0) { 250 error = ENOTCONN; 251 goto bad; 252 } 253 if (so->so_state & SS_ISDISCONNECTING) { 254 error = EALREADY; 255 goto bad; 256 } 257 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 258 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 259 bad: 260 splx(s); 261 return (error); 262 } 263 264 /* 265 * Send on a socket. 266 * If send must go all at once and message is larger than 267 * send buffering, then hard error. 268 * Lock against other senders. 269 * If must go all at once and not enough room now, then 270 * inform user that this would block and do nothing. 271 * Otherwise, if nonblocking, send as much as possible. 272 * The data to be sent is described by "uio" if nonzero, 273 * otherwise by the mbuf chain "top" (which must be null 274 * if uio is not). Data provided in mbuf chain must be small 275 * enough to send all at once. 276 * 277 * Returns nonzero on error, timeout or signal; callers 278 * must check for short counts if EINTR/ERESTART are returned. 279 * Data and control buffers are freed on return. 280 */ 281 sosend(so, addr, uio, top, control, flags) 282 register struct socket *so; 283 struct mbuf *addr; 284 struct uio *uio; 285 struct mbuf *top; 286 struct mbuf *control; 287 int flags; 288 { 289 struct mbuf **mp; 290 register struct mbuf *m; 291 register long space, len, resid; 292 int clen = 0, error, s, dontroute, mlen; 293 int atomic = sosendallatonce(so) || top; 294 295 if (uio) 296 resid = uio->uio_resid; 297 else 298 resid = top->m_pkthdr.len; 299 dontroute = 300 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 301 (so->so_proto->pr_flags & PR_ATOMIC); 302 u.u_ru.ru_msgsnd++; 303 if (control) 304 clen = control->m_len; 305 #define snderr(errno) { error = errno; splx(s); goto release; } 306 307 restart: 308 if (error = sblock(&so->so_snd)) 309 goto out; 310 do { 311 s = splnet(); 312 if (so->so_state & SS_CANTSENDMORE) 313 snderr(EPIPE); 314 if (so->so_error) 315 snderr(so->so_error); 316 if ((so->so_state & SS_ISCONNECTED) == 0) { 317 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 318 if ((so->so_state & SS_ISCONFIRMING) == 0) 319 snderr(ENOTCONN); 320 } else if (addr == 0) 321 snderr(EDESTADDRREQ); 322 } 323 space = sbspace(&so->so_snd); 324 if (flags & MSG_OOB) 325 space += 1024; 326 if (space < resid + clen && 327 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 328 if (atomic && resid > so->so_snd.sb_hiwat || 329 clen > so->so_snd.sb_hiwat) 330 snderr(EMSGSIZE); 331 if (so->so_state & SS_NBIO) 332 snderr(EWOULDBLOCK); 333 sbunlock(&so->so_snd); 334 error = sbwait(&so->so_snd); 335 splx(s); 336 if (error) 337 goto out; 338 goto restart; 339 } 340 splx(s); 341 mp = ⊤ 342 space -= clen; 343 do { 344 if (uio == NULL) { 345 /* 346 * Data is prepackaged in "top". 347 */ 348 resid = 0; 349 if (flags & MSG_EOR) 350 top->m_flags |= M_EOR; 351 } else do { 352 if (top == 0) { 353 MGETHDR(m, M_WAIT, MT_DATA); 354 mlen = MHLEN; 355 m->m_pkthdr.len = 0; 356 m->m_pkthdr.rcvif = (struct ifnet *)0; 357 } else { 358 MGET(m, M_WAIT, MT_DATA); 359 mlen = MLEN; 360 } 361 if (resid >= MINCLSIZE && space >= MCLBYTES) { 362 MCLGET(m, M_WAIT); 363 if ((m->m_flags & M_EXT) == 0) 364 goto nopages; 365 mlen = MCLBYTES; 366 #ifdef MAPPED_MBUFS 367 len = min(MCLBYTES, resid); 368 #else 369 if (top == 0) { 370 len = min(MCLBYTES - max_hdr, resid); 371 m->m_data += max_hdr; 372 } else 373 len = min(MCLBYTES, resid); 374 #endif 375 space -= MCLBYTES; 376 } else { 377 nopages: 378 len = min(min(mlen, resid), space); 379 space -= len; 380 /* 381 * For datagram protocols, leave room 382 * for protocol headers in first mbuf. 383 */ 384 if (atomic && top == 0 && len < mlen) 385 MH_ALIGN(m, len); 386 } 387 error = uiomove(mtod(m, caddr_t), (int)len, uio); 388 resid = uio->uio_resid; 389 m->m_len = len; 390 *mp = m; 391 top->m_pkthdr.len += len; 392 if (error) 393 goto release; 394 mp = &m->m_next; 395 if (resid <= 0) { 396 if (flags & MSG_EOR) 397 top->m_flags |= M_EOR; 398 break; 399 } 400 } while (space > 0 && atomic); 401 if (dontroute) 402 so->so_options |= SO_DONTROUTE; 403 s = splnet(); /* XXX */ 404 error = (*so->so_proto->pr_usrreq)(so, 405 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 406 top, addr, control); 407 splx(s); 408 if (dontroute) 409 so->so_options &= ~SO_DONTROUTE; 410 clen = 0; 411 control = 0; 412 top = 0; 413 mp = ⊤ 414 if (error) 415 goto release; 416 } while (resid && space > 0); 417 } while (resid); 418 419 release: 420 sbunlock(&so->so_snd); 421 out: 422 if (top) 423 m_freem(top); 424 if (control) 425 m_freem(control); 426 return (error); 427 } 428 429 /* 430 * Implement receive operations on a socket. 431 * We depend on the way that records are added to the sockbuf 432 * by sbappend*. In particular, each record (mbufs linked through m_next) 433 * must begin with an address if the protocol so specifies, 434 * followed by an optional mbuf or mbufs containing ancillary data, 435 * and then zero or more mbufs of data. 436 * In order to avoid blocking network interrupts for the entire time here, 437 * we splx() while doing the actual copy to user space. 438 * Although the sockbuf is locked, new data may still be appended, 439 * and thus we must maintain consistency of the sockbuf during that time. 440 * 441 * The caller may receive the data as a single mbuf chain by supplying 442 * an mbuf **mp0 for use in returning the chain. The uio is then used 443 * only for the count in uio_resid. 444 */ 445 soreceive(so, paddr, uio, mp0, controlp, flagsp) 446 register struct socket *so; 447 struct mbuf **paddr; 448 struct uio *uio; 449 struct mbuf **mp0; 450 struct mbuf **controlp; 451 int *flagsp; 452 { 453 register struct mbuf *m, **mp; 454 register int flags, len, error, s, offset; 455 struct protosw *pr = so->so_proto; 456 struct mbuf *nextrecord; 457 int moff, type; 458 459 mp = mp0; 460 if (paddr) 461 *paddr = 0; 462 if (controlp) 463 *controlp = 0; 464 if (flagsp) 465 flags = *flagsp &~ MSG_EOR; 466 else 467 flags = 0; 468 if (flags & MSG_OOB) { 469 m = m_get(M_WAIT, MT_DATA); 470 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 471 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 472 if (error) 473 goto bad; 474 do { 475 error = uiomove(mtod(m, caddr_t), 476 (int) min(uio->uio_resid, m->m_len), uio); 477 m = m_free(m); 478 } while (uio->uio_resid && error == 0 && m); 479 bad: 480 if (m) 481 m_freem(m); 482 return (error); 483 } 484 if (mp) 485 *mp = (struct mbuf *)0; 486 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 487 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 488 (struct mbuf *)0, (struct mbuf *)0); 489 490 restart: 491 if (error = sblock(&so->so_rcv)) 492 return (error); 493 s = splnet(); 494 495 m = so->so_rcv.sb_mb; 496 /* 497 * If we have less data than requested, block awaiting more 498 * (subject to any timeout) if: 499 * 1. the current count is less than the low water mark, or 500 * 2. MSG_WAITALL is set, and it is possible to do the entire 501 * receive operation at once if we block (resid <= hiwat). 502 * If MSG_WAITALL is set but resid is larger than the receive buffer, 503 * we have to do the receive in sections, and thus risk returning 504 * a short count if a timeout or signal occurs after we start. 505 */ 506 while (m == 0 || so->so_rcv.sb_cc < uio->uio_resid && 507 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 508 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 509 m->m_nextpkt == 0) { 510 #ifdef DIAGNOSTIC 511 if (m == 0 && so->so_rcv.sb_cc) 512 panic("receive 1"); 513 #endif 514 if (so->so_error) { 515 if (m) 516 break; 517 error = so->so_error; 518 if ((flags & MSG_PEEK) == 0) 519 so->so_error = 0; 520 goto release; 521 } 522 if (so->so_state & SS_CANTRCVMORE) { 523 if (m) 524 break; 525 else 526 goto release; 527 } 528 for (; m; m = m->m_next) 529 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 530 m = so->so_rcv.sb_mb; 531 goto dontblock; 532 } 533 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 534 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 535 error = ENOTCONN; 536 goto release; 537 } 538 if (uio->uio_resid == 0) 539 goto release; 540 if (so->so_state & SS_NBIO) { 541 error = EWOULDBLOCK; 542 goto release; 543 } 544 sbunlock(&so->so_rcv); 545 error = sbwait(&so->so_rcv); 546 splx(s); 547 if (error) 548 return (error); 549 goto restart; 550 } 551 dontblock: 552 u.u_ru.ru_msgrcv++; 553 nextrecord = m->m_nextpkt; 554 if (pr->pr_flags & PR_ADDR) { 555 #ifdef DIAGNOSTIC 556 if (m->m_type != MT_SONAME) 557 panic("receive 1a"); 558 #endif 559 if (flags & MSG_PEEK) { 560 if (paddr) 561 *paddr = m_copy(m, 0, m->m_len); 562 m = m->m_next; 563 } else { 564 sbfree(&so->so_rcv, m); 565 if (paddr) { 566 *paddr = m; 567 so->so_rcv.sb_mb = m->m_next; 568 m->m_next = 0; 569 m = so->so_rcv.sb_mb; 570 } else { 571 MFREE(m, so->so_rcv.sb_mb); 572 m = so->so_rcv.sb_mb; 573 } 574 } 575 } 576 while (m && m->m_type == MT_CONTROL && error == 0) { 577 if (flags & MSG_PEEK) { 578 if (controlp) 579 *controlp = m_copy(m, 0, m->m_len); 580 m = m->m_next; 581 } else { 582 sbfree(&so->so_rcv, m); 583 if (controlp) { 584 if (pr->pr_domain->dom_externalize && 585 mtod(m, struct cmsghdr *)->cmsg_type == 586 SCM_RIGHTS) 587 error = (*pr->pr_domain->dom_externalize)(m); 588 *controlp = m; 589 so->so_rcv.sb_mb = m->m_next; 590 m->m_next = 0; 591 m = so->so_rcv.sb_mb; 592 } else { 593 MFREE(m, so->so_rcv.sb_mb); 594 m = so->so_rcv.sb_mb; 595 } 596 } 597 if (controlp) 598 controlp = &(*controlp)->m_next; 599 } 600 if (m) { 601 if ((flags & MSG_PEEK) == 0) 602 m->m_nextpkt = nextrecord; 603 type = m->m_type; 604 if (type == MT_OOBDATA) 605 flags |= MSG_OOB; 606 } 607 moff = 0; 608 offset = 0; 609 while (m && uio->uio_resid > 0 && error == 0) { 610 if (m->m_type == MT_OOBDATA) { 611 if (type != MT_OOBDATA) 612 break; 613 } else if (type == MT_OOBDATA) 614 break; 615 #ifdef DIAGNOSTIC 616 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 617 panic("receive 3"); 618 #endif 619 so->so_state &= ~SS_RCVATMARK; 620 len = uio->uio_resid; 621 if (so->so_oobmark && len > so->so_oobmark - offset) 622 len = so->so_oobmark - offset; 623 if (len > m->m_len - moff) 624 len = m->m_len - moff; 625 /* 626 * If mp is set, just pass back the mbufs. 627 * Otherwise copy them out via the uio, then free. 628 * Sockbuf must be consistent here (points to current mbuf, 629 * it points to next record) when we drop priority; 630 * we must note any additions to the sockbuf when we 631 * block interrupts again. 632 */ 633 if (mp == 0) { 634 splx(s); 635 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 636 s = splnet(); 637 } else 638 uio->uio_resid -= len; 639 if (len == m->m_len - moff) { 640 if (m->m_flags & M_EOR) 641 flags |= MSG_EOR; 642 if (flags & MSG_PEEK) { 643 m = m->m_next; 644 moff = 0; 645 } else { 646 nextrecord = m->m_nextpkt; 647 sbfree(&so->so_rcv, m); 648 if (mp) { 649 *mp = m; 650 mp = &m->m_next; 651 so->so_rcv.sb_mb = m = m->m_next; 652 *mp = (struct mbuf *)0; 653 } else { 654 MFREE(m, so->so_rcv.sb_mb); 655 m = so->so_rcv.sb_mb; 656 } 657 if (m) 658 m->m_nextpkt = nextrecord; 659 } 660 } else { 661 if (flags & MSG_PEEK) 662 moff += len; 663 else { 664 if (mp) 665 *mp = m_copym(m, 0, len, M_WAIT); 666 m->m_data += len; 667 m->m_len -= len; 668 so->so_rcv.sb_cc -= len; 669 } 670 } 671 if (so->so_oobmark) { 672 if ((flags & MSG_PEEK) == 0) { 673 so->so_oobmark -= len; 674 if (so->so_oobmark == 0) { 675 so->so_state |= SS_RCVATMARK; 676 break; 677 } 678 } else 679 offset += len; 680 } 681 if (flags & MSG_EOR) 682 break; 683 /* 684 * If the MSG_WAITALL flag is set (for non-atomic socket), 685 * we must not quit until "uio->uio_resid == 0" or an error 686 * termination. If a signal/timeout occurs, return 687 * with a short count but without error. 688 * Keep sockbuf locked against other readers. 689 */ 690 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && 691 !sosendallatonce(so)) { 692 if (so->so_error || so->so_state & SS_CANTRCVMORE) 693 break; 694 error = sbwait(&so->so_rcv); 695 if (error) { 696 sbunlock(&so->so_rcv); 697 splx(s); 698 return (0); 699 } 700 if (m = so->so_rcv.sb_mb) 701 nextrecord = m->m_nextpkt; 702 } 703 } 704 if ((flags & MSG_PEEK) == 0) { 705 if (m == 0) 706 so->so_rcv.sb_mb = nextrecord; 707 else if (pr->pr_flags & PR_ATOMIC) { 708 flags |= MSG_TRUNC; 709 (void) sbdroprecord(&so->so_rcv); 710 } 711 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 712 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 713 (struct mbuf *)flags, (struct mbuf *)0, 714 (struct mbuf *)0); 715 } 716 if (flagsp) 717 *flagsp |= flags; 718 release: 719 sbunlock(&so->so_rcv); 720 splx(s); 721 return (error); 722 } 723 724 soshutdown(so, how) 725 register struct socket *so; 726 register int how; 727 { 728 register struct protosw *pr = so->so_proto; 729 730 how++; 731 if (how & FREAD) 732 sorflush(so); 733 if (how & FWRITE) 734 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 735 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 736 return (0); 737 } 738 739 sorflush(so) 740 register struct socket *so; 741 { 742 register struct sockbuf *sb = &so->so_rcv; 743 register struct protosw *pr = so->so_proto; 744 register int s; 745 struct sockbuf asb; 746 747 sb->sb_flags |= SB_NOINTR; 748 (void) sblock(sb); 749 s = splimp(); 750 socantrcvmore(so); 751 sbunlock(sb); 752 asb = *sb; 753 bzero((caddr_t)sb, sizeof (*sb)); 754 splx(s); 755 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 756 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 757 sbrelease(&asb); 758 } 759 760 sosetopt(so, level, optname, m0) 761 register struct socket *so; 762 int level, optname; 763 struct mbuf *m0; 764 { 765 int error = 0; 766 register struct mbuf *m = m0; 767 768 if (level != SOL_SOCKET) { 769 if (so->so_proto && so->so_proto->pr_ctloutput) 770 return ((*so->so_proto->pr_ctloutput) 771 (PRCO_SETOPT, so, level, optname, &m0)); 772 error = ENOPROTOOPT; 773 } else { 774 switch (optname) { 775 776 case SO_LINGER: 777 if (m == NULL || m->m_len != sizeof (struct linger)) { 778 error = EINVAL; 779 goto bad; 780 } 781 so->so_linger = mtod(m, struct linger *)->l_linger; 782 /* fall thru... */ 783 784 case SO_DEBUG: 785 case SO_KEEPALIVE: 786 case SO_DONTROUTE: 787 case SO_USELOOPBACK: 788 case SO_BROADCAST: 789 case SO_REUSEADDR: 790 case SO_OOBINLINE: 791 if (m == NULL || m->m_len < sizeof (int)) { 792 error = EINVAL; 793 goto bad; 794 } 795 if (*mtod(m, int *)) 796 so->so_options |= optname; 797 else 798 so->so_options &= ~optname; 799 break; 800 801 case SO_SNDBUF: 802 case SO_RCVBUF: 803 case SO_SNDLOWAT: 804 case SO_RCVLOWAT: 805 if (m == NULL || m->m_len < sizeof (int)) { 806 error = EINVAL; 807 goto bad; 808 } 809 switch (optname) { 810 811 case SO_SNDBUF: 812 case SO_RCVBUF: 813 if (sbreserve(optname == SO_SNDBUF ? 814 &so->so_snd : &so->so_rcv, 815 (u_long) *mtod(m, int *)) == 0) { 816 error = ENOBUFS; 817 goto bad; 818 } 819 break; 820 821 case SO_SNDLOWAT: 822 so->so_snd.sb_lowat = *mtod(m, int *); 823 break; 824 case SO_RCVLOWAT: 825 so->so_rcv.sb_lowat = *mtod(m, int *); 826 break; 827 } 828 break; 829 830 case SO_SNDTIMEO: 831 case SO_RCVTIMEO: 832 { 833 struct timeval *tv; 834 short val; 835 836 if (m == NULL || m->m_len < sizeof (*tv)) { 837 error = EINVAL; 838 goto bad; 839 } 840 tv = mtod(m, struct timeval *); 841 if (tv->tv_sec > SHRT_MAX / hz - hz) { 842 error = EDOM; 843 goto bad; 844 } 845 val = tv->tv_sec * hz + tv->tv_usec / tick; 846 847 switch (optname) { 848 849 case SO_SNDTIMEO: 850 so->so_snd.sb_timeo = val; 851 break; 852 case SO_RCVTIMEO: 853 so->so_rcv.sb_timeo = val; 854 break; 855 } 856 break; 857 } 858 859 default: 860 error = ENOPROTOOPT; 861 break; 862 } 863 } 864 bad: 865 if (m) 866 (void) m_free(m); 867 return (error); 868 } 869 870 sogetopt(so, level, optname, mp) 871 register struct socket *so; 872 int level, optname; 873 struct mbuf **mp; 874 { 875 register struct mbuf *m; 876 877 if (level != SOL_SOCKET) { 878 if (so->so_proto && so->so_proto->pr_ctloutput) { 879 return ((*so->so_proto->pr_ctloutput) 880 (PRCO_GETOPT, so, level, optname, mp)); 881 } else 882 return (ENOPROTOOPT); 883 } else { 884 m = m_get(M_WAIT, MT_SOOPTS); 885 m->m_len = sizeof (int); 886 887 switch (optname) { 888 889 case SO_LINGER: 890 m->m_len = sizeof (struct linger); 891 mtod(m, struct linger *)->l_onoff = 892 so->so_options & SO_LINGER; 893 mtod(m, struct linger *)->l_linger = so->so_linger; 894 break; 895 896 case SO_USELOOPBACK: 897 case SO_DONTROUTE: 898 case SO_DEBUG: 899 case SO_KEEPALIVE: 900 case SO_REUSEADDR: 901 case SO_BROADCAST: 902 case SO_OOBINLINE: 903 *mtod(m, int *) = so->so_options & optname; 904 break; 905 906 case SO_TYPE: 907 *mtod(m, int *) = so->so_type; 908 break; 909 910 case SO_ERROR: 911 *mtod(m, int *) = so->so_error; 912 so->so_error = 0; 913 break; 914 915 case SO_SNDBUF: 916 *mtod(m, int *) = so->so_snd.sb_hiwat; 917 break; 918 919 case SO_RCVBUF: 920 *mtod(m, int *) = so->so_rcv.sb_hiwat; 921 break; 922 923 case SO_SNDLOWAT: 924 *mtod(m, int *) = so->so_snd.sb_lowat; 925 break; 926 927 case SO_RCVLOWAT: 928 *mtod(m, int *) = so->so_rcv.sb_lowat; 929 break; 930 931 case SO_SNDTIMEO: 932 case SO_RCVTIMEO: 933 { 934 int val = (optname == SO_SNDTIMEO ? 935 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 936 937 m->m_len = sizeof(struct timeval); 938 mtod(m, struct timeval *)->tv_sec = val / hz; 939 mtod(m, struct timeval *)->tv_usec = 940 (val % hz) / tick; 941 break; 942 } 943 944 default: 945 (void)m_free(m); 946 return (ENOPROTOOPT); 947 } 948 *mp = m; 949 return (0); 950 } 951 } 952 953 sohasoutofband(so) 954 register struct socket *so; 955 { 956 struct proc *p; 957 958 if (so->so_pgid < 0) 959 gsignal(-so->so_pgid, SIGURG); 960 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 961 psignal(p, SIGURG); 962 if (so->so_rcv.sb_sel) { 963 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 964 so->so_rcv.sb_sel = 0; 965 so->so_rcv.sb_flags &= ~SB_COLL; 966 } 967 } 968