1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_socket.c 7.31 (Berkeley) 11/19/91 8 */ 9 10 #include "param.h" 11 #include "proc.h" 12 #include "file.h" 13 #include "malloc.h" 14 #include "mbuf.h" 15 #include "domain.h" 16 #include "kernel.h" 17 #include "protosw.h" 18 #include "socket.h" 19 #include "socketvar.h" 20 #include "resourcevar.h" 21 22 /* 23 * Socket operation routines. 24 * These routines are called by the routines in 25 * sys_socket.c or from a system process, and 26 * implement the semantics of socket operations by 27 * switching out to the protocol specific routines. 28 */ 29 /*ARGSUSED*/ 30 socreate(dom, aso, type, proto) 31 struct socket **aso; 32 register int type; 33 int proto; 34 { 35 struct proc *p = curproc; /* XXX */ 36 register struct protosw *prp; 37 register struct socket *so; 38 register int error; 39 40 if (proto) 41 prp = pffindproto(dom, proto, type); 42 else 43 prp = pffindtype(dom, type); 44 if (prp == 0) 45 return (EPROTONOSUPPORT); 46 if (prp->pr_type != type) 47 return (EPROTOTYPE); 48 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 49 bzero((caddr_t)so, sizeof(*so)); 50 so->so_type = type; 51 if (p->p_ucred->cr_uid == 0) 52 so->so_state = SS_PRIV; 53 so->so_proto = prp; 54 error = 55 (*prp->pr_usrreq)(so, PRU_ATTACH, 56 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 57 if (error) { 58 so->so_state |= SS_NOFDREF; 59 sofree(so); 60 return (error); 61 } 62 *aso = so; 63 return (0); 64 } 65 66 sobind(so, nam) 67 struct socket *so; 68 struct mbuf *nam; 69 { 70 int s = splnet(); 71 int error; 72 73 error = 74 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 75 (struct mbuf *)0, nam, (struct mbuf *)0); 76 splx(s); 77 return (error); 78 } 79 80 solisten(so, backlog) 81 register struct socket *so; 82 int backlog; 83 { 84 int s = splnet(), error; 85 86 error = 87 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 88 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 89 if (error) { 90 splx(s); 91 return (error); 92 } 93 if (so->so_q == 0) 94 so->so_options |= SO_ACCEPTCONN; 95 if (backlog < 0) 96 backlog = 0; 97 so->so_qlimit = min(backlog, SOMAXCONN); 98 splx(s); 99 return (0); 100 } 101 102 sofree(so) 103 register struct socket *so; 104 { 105 106 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 107 return; 108 if (so->so_head) { 109 if (!soqremque(so, 0) && !soqremque(so, 1)) 110 panic("sofree dq"); 111 so->so_head = 0; 112 } 113 sbrelease(&so->so_snd); 114 sorflush(so); 115 FREE(so, M_SOCKET); 116 } 117 118 /* 119 * Close a socket on last file table reference removal. 120 * Initiate disconnect if connected. 121 * Free socket when disconnect complete. 122 */ 123 soclose(so) 124 register struct socket *so; 125 { 126 int s = splnet(); /* conservative */ 127 int error = 0; 128 129 if (so->so_options & SO_ACCEPTCONN) { 130 while (so->so_q0) 131 (void) soabort(so->so_q0); 132 while (so->so_q) 133 (void) soabort(so->so_q); 134 } 135 if (so->so_pcb == 0) 136 goto discard; 137 if (so->so_state & SS_ISCONNECTED) { 138 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 139 error = sodisconnect(so); 140 if (error) 141 goto drop; 142 } 143 if (so->so_options & SO_LINGER) { 144 if ((so->so_state & SS_ISDISCONNECTING) && 145 (so->so_state & SS_NBIO)) 146 goto drop; 147 while (so->so_state & SS_ISCONNECTED) 148 if (error = tsleep((caddr_t)&so->so_timeo, 149 PSOCK | PCATCH, netcls, so->so_linger)) 150 break; 151 } 152 } 153 drop: 154 if (so->so_pcb) { 155 int error2 = 156 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 157 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 158 if (error == 0) 159 error = error2; 160 } 161 discard: 162 if (so->so_state & SS_NOFDREF) 163 panic("soclose: NOFDREF"); 164 so->so_state |= SS_NOFDREF; 165 sofree(so); 166 splx(s); 167 return (error); 168 } 169 170 /* 171 * Must be called at splnet... 172 */ 173 soabort(so) 174 struct socket *so; 175 { 176 177 return ( 178 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 179 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 180 } 181 182 soaccept(so, nam) 183 register struct socket *so; 184 struct mbuf *nam; 185 { 186 int s = splnet(); 187 int error; 188 189 if ((so->so_state & SS_NOFDREF) == 0) 190 panic("soaccept: !NOFDREF"); 191 so->so_state &= ~SS_NOFDREF; 192 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 193 (struct mbuf *)0, nam, (struct mbuf *)0); 194 splx(s); 195 return (error); 196 } 197 198 soconnect(so, nam) 199 register struct socket *so; 200 struct mbuf *nam; 201 { 202 int s; 203 int error; 204 205 if (so->so_options & SO_ACCEPTCONN) 206 return (EOPNOTSUPP); 207 s = splnet(); 208 /* 209 * If protocol is connection-based, can only connect once. 210 * Otherwise, if connected, try to disconnect first. 211 * This allows user to disconnect by connecting to, e.g., 212 * a null address. 213 */ 214 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 215 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 216 (error = sodisconnect(so)))) 217 error = EISCONN; 218 else 219 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 220 (struct mbuf *)0, nam, (struct mbuf *)0); 221 splx(s); 222 return (error); 223 } 224 225 soconnect2(so1, so2) 226 register struct socket *so1; 227 struct socket *so2; 228 { 229 int s = splnet(); 230 int error; 231 232 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 233 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 234 splx(s); 235 return (error); 236 } 237 238 sodisconnect(so) 239 register struct socket *so; 240 { 241 int s = splnet(); 242 int error; 243 244 if ((so->so_state & SS_ISCONNECTED) == 0) { 245 error = ENOTCONN; 246 goto bad; 247 } 248 if (so->so_state & SS_ISDISCONNECTING) { 249 error = EALREADY; 250 goto bad; 251 } 252 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 253 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 254 bad: 255 splx(s); 256 return (error); 257 } 258 259 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 260 /* 261 * Send on a socket. 262 * If send must go all at once and message is larger than 263 * send buffering, then hard error. 264 * Lock against other senders. 265 * If must go all at once and not enough room now, then 266 * inform user that this would block and do nothing. 267 * Otherwise, if nonblocking, send as much as possible. 268 * The data to be sent is described by "uio" if nonzero, 269 * otherwise by the mbuf chain "top" (which must be null 270 * if uio is not). Data provided in mbuf chain must be small 271 * enough to send all at once. 272 * 273 * Returns nonzero on error, timeout or signal; callers 274 * must check for short counts if EINTR/ERESTART are returned. 275 * Data and control buffers are freed on return. 276 */ 277 sosend(so, addr, uio, top, control, flags) 278 register struct socket *so; 279 struct mbuf *addr; 280 struct uio *uio; 281 struct mbuf *top; 282 struct mbuf *control; 283 int flags; 284 { 285 struct proc *p = curproc; /* XXX */ 286 struct mbuf **mp; 287 register struct mbuf *m; 288 register long space, len, resid; 289 int clen = 0, error, s, dontroute, mlen; 290 int atomic = sosendallatonce(so) || top; 291 292 if (uio) 293 resid = uio->uio_resid; 294 else 295 resid = top->m_pkthdr.len; 296 dontroute = 297 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 298 (so->so_proto->pr_flags & PR_ATOMIC); 299 p->p_stats->p_ru.ru_msgsnd++; 300 if (control) 301 clen = control->m_len; 302 #define snderr(errno) { error = errno; splx(s); goto release; } 303 304 restart: 305 if (error = sblock(&so->so_snd, SBLOCKWAIT(flags))) 306 goto out; 307 do { 308 s = splnet(); 309 if (so->so_state & SS_CANTSENDMORE) 310 snderr(EPIPE); 311 if (so->so_error) 312 snderr(so->so_error); 313 if ((so->so_state & SS_ISCONNECTED) == 0) { 314 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 315 if ((so->so_state & SS_ISCONFIRMING) == 0 && 316 !(resid == 0 && clen != 0)) 317 snderr(ENOTCONN); 318 } else if (addr == 0) 319 snderr(EDESTADDRREQ); 320 } 321 space = sbspace(&so->so_snd); 322 if (flags & MSG_OOB) 323 space += 1024; 324 if (atomic && resid > so->so_snd.sb_hiwat || 325 clen > so->so_snd.sb_hiwat) 326 snderr(EMSGSIZE); 327 if (space < resid + clen && uio && 328 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 329 if (so->so_state & SS_NBIO) 330 snderr(EWOULDBLOCK); 331 sbunlock(&so->so_snd); 332 error = sbwait(&so->so_snd); 333 splx(s); 334 if (error) 335 goto out; 336 goto restart; 337 } 338 splx(s); 339 mp = ⊤ 340 space -= clen; 341 do { 342 if (uio == NULL) { 343 /* 344 * Data is prepackaged in "top". 345 */ 346 resid = 0; 347 if (flags & MSG_EOR) 348 top->m_flags |= M_EOR; 349 } else do { 350 if (top == 0) { 351 MGETHDR(m, M_WAIT, MT_DATA); 352 mlen = MHLEN; 353 m->m_pkthdr.len = 0; 354 m->m_pkthdr.rcvif = (struct ifnet *)0; 355 } else { 356 MGET(m, M_WAIT, MT_DATA); 357 mlen = MLEN; 358 } 359 if (resid >= MINCLSIZE && space >= MCLBYTES) { 360 MCLGET(m, M_WAIT); 361 if ((m->m_flags & M_EXT) == 0) 362 goto nopages; 363 mlen = MCLBYTES; 364 #ifdef MAPPED_MBUFS 365 len = min(MCLBYTES, resid); 366 #else 367 if (top == 0) { 368 len = min(MCLBYTES - max_hdr, resid); 369 m->m_data += max_hdr; 370 } else 371 len = min(MCLBYTES, resid); 372 #endif 373 space -= MCLBYTES; 374 } else { 375 nopages: 376 len = min(min(mlen, resid), space); 377 space -= len; 378 /* 379 * For datagram protocols, leave room 380 * for protocol headers in first mbuf. 381 */ 382 if (atomic && top == 0 && len < mlen) 383 MH_ALIGN(m, len); 384 } 385 error = uiomove(mtod(m, caddr_t), (int)len, uio); 386 resid = uio->uio_resid; 387 m->m_len = len; 388 *mp = m; 389 top->m_pkthdr.len += len; 390 if (error) 391 goto release; 392 mp = &m->m_next; 393 if (resid <= 0) { 394 if (flags & MSG_EOR) 395 top->m_flags |= M_EOR; 396 break; 397 } 398 } while (space > 0 && atomic); 399 if (dontroute) 400 so->so_options |= SO_DONTROUTE; 401 s = splnet(); /* XXX */ 402 error = (*so->so_proto->pr_usrreq)(so, 403 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 404 top, addr, control); 405 splx(s); 406 if (dontroute) 407 so->so_options &= ~SO_DONTROUTE; 408 clen = 0; 409 control = 0; 410 top = 0; 411 mp = ⊤ 412 if (error) 413 goto release; 414 } while (resid && space > 0); 415 } while (resid); 416 417 release: 418 sbunlock(&so->so_snd); 419 out: 420 if (top) 421 m_freem(top); 422 if (control) 423 m_freem(control); 424 return (error); 425 } 426 427 /* 428 * Implement receive operations on a socket. 429 * We depend on the way that records are added to the sockbuf 430 * by sbappend*. In particular, each record (mbufs linked through m_next) 431 * must begin with an address if the protocol so specifies, 432 * followed by an optional mbuf or mbufs containing ancillary data, 433 * and then zero or more mbufs of data. 434 * In order to avoid blocking network interrupts for the entire time here, 435 * we splx() while doing the actual copy to user space. 436 * Although the sockbuf is locked, new data may still be appended, 437 * and thus we must maintain consistency of the sockbuf during that time. 438 * 439 * The caller may receive the data as a single mbuf chain by supplying 440 * an mbuf **mp0 for use in returning the chain. The uio is then used 441 * only for the count in uio_resid. 442 */ 443 soreceive(so, paddr, uio, mp0, controlp, flagsp) 444 register struct socket *so; 445 struct mbuf **paddr; 446 struct uio *uio; 447 struct mbuf **mp0; 448 struct mbuf **controlp; 449 int *flagsp; 450 { 451 struct proc *p = curproc; /* XXX */ 452 register struct mbuf *m, **mp; 453 register int flags, len, error, s, offset; 454 struct protosw *pr = so->so_proto; 455 struct mbuf *nextrecord; 456 int moff, type; 457 458 mp = mp0; 459 if (paddr) 460 *paddr = 0; 461 if (controlp) 462 *controlp = 0; 463 if (flagsp) 464 flags = *flagsp &~ MSG_EOR; 465 else 466 flags = 0; 467 if (flags & MSG_OOB) { 468 m = m_get(M_WAIT, MT_DATA); 469 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 470 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 471 if (error) 472 goto bad; 473 do { 474 error = uiomove(mtod(m, caddr_t), 475 (int) min(uio->uio_resid, m->m_len), uio); 476 m = m_free(m); 477 } while (uio->uio_resid && error == 0 && m); 478 bad: 479 if (m) 480 m_freem(m); 481 return (error); 482 } 483 if (mp) 484 *mp = (struct mbuf *)0; 485 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 486 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 487 (struct mbuf *)0, (struct mbuf *)0); 488 489 restart: 490 if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) 491 return (error); 492 s = splnet(); 493 494 m = so->so_rcv.sb_mb; 495 /* 496 * If we have less data than requested, block awaiting more 497 * (subject to any timeout) if: 498 * 1. the current count is less than the low water mark, or 499 * 2. MSG_WAITALL is set, and it is possible to do the entire 500 * receive operation at once if we block (resid <= hiwat). 501 * 3. MSG_DONTWAIT is not set 502 * If MSG_WAITALL is set but resid is larger than the receive buffer, 503 * we have to do the receive in sections, and thus risk returning 504 * a short count if a timeout or signal occurs after we start. 505 */ 506 if (m == 0 || ((flags & MSG_DONTWAIT) == 0 && 507 so->so_rcv.sb_cc < uio->uio_resid) && 508 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 509 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 510 m->m_nextpkt == 0) { 511 #ifdef DIAGNOSTIC 512 if (m == 0 && so->so_rcv.sb_cc) 513 panic("receive 1"); 514 #endif 515 if (so->so_error) { 516 if (m) 517 goto dontblock; 518 error = so->so_error; 519 if ((flags & MSG_PEEK) == 0) 520 so->so_error = 0; 521 goto release; 522 } 523 if (so->so_state & SS_CANTRCVMORE) { 524 if (m) 525 goto dontblock; 526 else 527 goto release; 528 } 529 for (; m; m = m->m_next) 530 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 531 m = so->so_rcv.sb_mb; 532 goto dontblock; 533 } 534 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 535 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 536 error = ENOTCONN; 537 goto release; 538 } 539 if (uio->uio_resid == 0) 540 goto release; 541 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) { 542 error = EWOULDBLOCK; 543 goto release; 544 } 545 sbunlock(&so->so_rcv); 546 error = sbwait(&so->so_rcv); 547 splx(s); 548 if (error) 549 return (error); 550 goto restart; 551 } 552 dontblock: 553 p->p_stats->p_ru.ru_msgrcv++; 554 nextrecord = m->m_nextpkt; 555 if (pr->pr_flags & PR_ADDR) { 556 #ifdef DIAGNOSTIC 557 if (m->m_type != MT_SONAME) 558 panic("receive 1a"); 559 #endif 560 if (flags & MSG_PEEK) { 561 if (paddr) 562 *paddr = m_copy(m, 0, m->m_len); 563 m = m->m_next; 564 } else { 565 sbfree(&so->so_rcv, m); 566 if (paddr) { 567 *paddr = m; 568 so->so_rcv.sb_mb = m->m_next; 569 m->m_next = 0; 570 m = so->so_rcv.sb_mb; 571 } else { 572 MFREE(m, so->so_rcv.sb_mb); 573 m = so->so_rcv.sb_mb; 574 } 575 } 576 } 577 while (m && m->m_type == MT_CONTROL && error == 0) { 578 if (flags & MSG_PEEK) { 579 if (controlp) 580 *controlp = m_copy(m, 0, m->m_len); 581 m = m->m_next; 582 } else { 583 sbfree(&so->so_rcv, m); 584 if (controlp) { 585 if (pr->pr_domain->dom_externalize && 586 mtod(m, struct cmsghdr *)->cmsg_type == 587 SCM_RIGHTS) 588 error = (*pr->pr_domain->dom_externalize)(m); 589 *controlp = m; 590 so->so_rcv.sb_mb = m->m_next; 591 m->m_next = 0; 592 m = so->so_rcv.sb_mb; 593 } else { 594 MFREE(m, so->so_rcv.sb_mb); 595 m = so->so_rcv.sb_mb; 596 } 597 } 598 if (controlp) 599 controlp = &(*controlp)->m_next; 600 } 601 if (m) { 602 if ((flags & MSG_PEEK) == 0) 603 m->m_nextpkt = nextrecord; 604 type = m->m_type; 605 if (type == MT_OOBDATA) 606 flags |= MSG_OOB; 607 } 608 moff = 0; 609 offset = 0; 610 while (m && uio->uio_resid > 0 && error == 0) { 611 if (m->m_type == MT_OOBDATA) { 612 if (type != MT_OOBDATA) 613 break; 614 } else if (type == MT_OOBDATA) 615 break; 616 #ifdef DIAGNOSTIC 617 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 618 panic("receive 3"); 619 #endif 620 so->so_state &= ~SS_RCVATMARK; 621 len = uio->uio_resid; 622 if (so->so_oobmark && len > so->so_oobmark - offset) 623 len = so->so_oobmark - offset; 624 if (len > m->m_len - moff) 625 len = m->m_len - moff; 626 /* 627 * If mp is set, just pass back the mbufs. 628 * Otherwise copy them out via the uio, then free. 629 * Sockbuf must be consistent here (points to current mbuf, 630 * it points to next record) when we drop priority; 631 * we must note any additions to the sockbuf when we 632 * block interrupts again. 633 */ 634 if (mp == 0) { 635 splx(s); 636 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 637 s = splnet(); 638 } else 639 uio->uio_resid -= len; 640 if (len == m->m_len - moff) { 641 if (m->m_flags & M_EOR) 642 flags |= MSG_EOR; 643 if (flags & MSG_PEEK) { 644 m = m->m_next; 645 moff = 0; 646 } else { 647 nextrecord = m->m_nextpkt; 648 sbfree(&so->so_rcv, m); 649 if (mp) { 650 *mp = m; 651 mp = &m->m_next; 652 so->so_rcv.sb_mb = m = m->m_next; 653 *mp = (struct mbuf *)0; 654 } else { 655 MFREE(m, so->so_rcv.sb_mb); 656 m = so->so_rcv.sb_mb; 657 } 658 if (m) 659 m->m_nextpkt = nextrecord; 660 } 661 } else { 662 if (flags & MSG_PEEK) 663 moff += len; 664 else { 665 if (mp) 666 *mp = m_copym(m, 0, len, M_WAIT); 667 m->m_data += len; 668 m->m_len -= len; 669 so->so_rcv.sb_cc -= len; 670 } 671 } 672 if (so->so_oobmark) { 673 if ((flags & MSG_PEEK) == 0) { 674 so->so_oobmark -= len; 675 if (so->so_oobmark == 0) { 676 so->so_state |= SS_RCVATMARK; 677 break; 678 } 679 } else 680 offset += len; 681 } 682 if (flags & MSG_EOR) 683 break; 684 /* 685 * If the MSG_WAITALL flag is set (for non-atomic socket), 686 * we must not quit until "uio->uio_resid == 0" or an error 687 * termination. If a signal/timeout occurs, return 688 * with a short count but without error. 689 * Keep sockbuf locked against other readers. 690 */ 691 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && 692 !sosendallatonce(so)) { 693 if (so->so_error || so->so_state & SS_CANTRCVMORE) 694 break; 695 error = sbwait(&so->so_rcv); 696 if (error) { 697 sbunlock(&so->so_rcv); 698 splx(s); 699 return (0); 700 } 701 if (m = so->so_rcv.sb_mb) 702 nextrecord = m->m_nextpkt; 703 } 704 } 705 if ((flags & MSG_PEEK) == 0) { 706 if (m == 0) 707 so->so_rcv.sb_mb = nextrecord; 708 else if (pr->pr_flags & PR_ATOMIC) { 709 flags |= MSG_TRUNC; 710 (void) sbdroprecord(&so->so_rcv); 711 } 712 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 713 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 714 (struct mbuf *)flags, (struct mbuf *)0, 715 (struct mbuf *)0); 716 } 717 if (flagsp) 718 *flagsp |= flags; 719 release: 720 sbunlock(&so->so_rcv); 721 splx(s); 722 return (error); 723 } 724 725 soshutdown(so, how) 726 register struct socket *so; 727 register int how; 728 { 729 register struct protosw *pr = so->so_proto; 730 731 how++; 732 if (how & FREAD) 733 sorflush(so); 734 if (how & FWRITE) 735 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 736 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 737 return (0); 738 } 739 740 sorflush(so) 741 register struct socket *so; 742 { 743 register struct sockbuf *sb = &so->so_rcv; 744 register struct protosw *pr = so->so_proto; 745 register int s; 746 struct sockbuf asb; 747 748 sb->sb_flags |= SB_NOINTR; 749 (void) sblock(sb, M_WAITOK); 750 s = splimp(); 751 socantrcvmore(so); 752 sbunlock(sb); 753 asb = *sb; 754 bzero((caddr_t)sb, sizeof (*sb)); 755 splx(s); 756 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 757 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 758 sbrelease(&asb); 759 } 760 761 sosetopt(so, level, optname, m0) 762 register struct socket *so; 763 int level, optname; 764 struct mbuf *m0; 765 { 766 int error = 0; 767 register struct mbuf *m = m0; 768 769 if (level != SOL_SOCKET) { 770 if (so->so_proto && so->so_proto->pr_ctloutput) 771 return ((*so->so_proto->pr_ctloutput) 772 (PRCO_SETOPT, so, level, optname, &m0)); 773 error = ENOPROTOOPT; 774 } else { 775 switch (optname) { 776 777 case SO_LINGER: 778 if (m == NULL || m->m_len != sizeof (struct linger)) { 779 error = EINVAL; 780 goto bad; 781 } 782 so->so_linger = mtod(m, struct linger *)->l_linger; 783 /* fall thru... */ 784 785 case SO_DEBUG: 786 case SO_KEEPALIVE: 787 case SO_DONTROUTE: 788 case SO_USELOOPBACK: 789 case SO_BROADCAST: 790 case SO_REUSEADDR: 791 case SO_OOBINLINE: 792 if (m == NULL || m->m_len < sizeof (int)) { 793 error = EINVAL; 794 goto bad; 795 } 796 if (*mtod(m, int *)) 797 so->so_options |= optname; 798 else 799 so->so_options &= ~optname; 800 break; 801 802 case SO_SNDBUF: 803 case SO_RCVBUF: 804 case SO_SNDLOWAT: 805 case SO_RCVLOWAT: 806 if (m == NULL || m->m_len < sizeof (int)) { 807 error = EINVAL; 808 goto bad; 809 } 810 switch (optname) { 811 812 case SO_SNDBUF: 813 case SO_RCVBUF: 814 if (sbreserve(optname == SO_SNDBUF ? 815 &so->so_snd : &so->so_rcv, 816 (u_long) *mtod(m, int *)) == 0) { 817 error = ENOBUFS; 818 goto bad; 819 } 820 break; 821 822 case SO_SNDLOWAT: 823 so->so_snd.sb_lowat = *mtod(m, int *); 824 break; 825 case SO_RCVLOWAT: 826 so->so_rcv.sb_lowat = *mtod(m, int *); 827 break; 828 } 829 break; 830 831 case SO_SNDTIMEO: 832 case SO_RCVTIMEO: 833 { 834 struct timeval *tv; 835 short val; 836 837 if (m == NULL || m->m_len < sizeof (*tv)) { 838 error = EINVAL; 839 goto bad; 840 } 841 tv = mtod(m, struct timeval *); 842 if (tv->tv_sec > SHRT_MAX / hz - hz) { 843 error = EDOM; 844 goto bad; 845 } 846 val = tv->tv_sec * hz + tv->tv_usec / tick; 847 848 switch (optname) { 849 850 case SO_SNDTIMEO: 851 so->so_snd.sb_timeo = val; 852 break; 853 case SO_RCVTIMEO: 854 so->so_rcv.sb_timeo = val; 855 break; 856 } 857 break; 858 } 859 860 default: 861 error = ENOPROTOOPT; 862 break; 863 } 864 m = 0; 865 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 866 (void) ((*so->so_proto->pr_ctloutput) 867 (PRCO_SETOPT, so, level, optname, &m0)); 868 } 869 bad: 870 if (m) 871 (void) m_free(m); 872 return (error); 873 } 874 875 sogetopt(so, level, optname, mp) 876 register struct socket *so; 877 int level, optname; 878 struct mbuf **mp; 879 { 880 register struct mbuf *m; 881 882 if (level != SOL_SOCKET) { 883 if (so->so_proto && so->so_proto->pr_ctloutput) { 884 return ((*so->so_proto->pr_ctloutput) 885 (PRCO_GETOPT, so, level, optname, mp)); 886 } else 887 return (ENOPROTOOPT); 888 } else { 889 m = m_get(M_WAIT, MT_SOOPTS); 890 m->m_len = sizeof (int); 891 892 switch (optname) { 893 894 case SO_LINGER: 895 m->m_len = sizeof (struct linger); 896 mtod(m, struct linger *)->l_onoff = 897 so->so_options & SO_LINGER; 898 mtod(m, struct linger *)->l_linger = so->so_linger; 899 break; 900 901 case SO_USELOOPBACK: 902 case SO_DONTROUTE: 903 case SO_DEBUG: 904 case SO_KEEPALIVE: 905 case SO_REUSEADDR: 906 case SO_BROADCAST: 907 case SO_OOBINLINE: 908 *mtod(m, int *) = so->so_options & optname; 909 break; 910 911 case SO_TYPE: 912 *mtod(m, int *) = so->so_type; 913 break; 914 915 case SO_ERROR: 916 *mtod(m, int *) = so->so_error; 917 so->so_error = 0; 918 break; 919 920 case SO_SNDBUF: 921 *mtod(m, int *) = so->so_snd.sb_hiwat; 922 break; 923 924 case SO_RCVBUF: 925 *mtod(m, int *) = so->so_rcv.sb_hiwat; 926 break; 927 928 case SO_SNDLOWAT: 929 *mtod(m, int *) = so->so_snd.sb_lowat; 930 break; 931 932 case SO_RCVLOWAT: 933 *mtod(m, int *) = so->so_rcv.sb_lowat; 934 break; 935 936 case SO_SNDTIMEO: 937 case SO_RCVTIMEO: 938 { 939 int val = (optname == SO_SNDTIMEO ? 940 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 941 942 m->m_len = sizeof(struct timeval); 943 mtod(m, struct timeval *)->tv_sec = val / hz; 944 mtod(m, struct timeval *)->tv_usec = 945 (val % hz) / tick; 946 break; 947 } 948 949 default: 950 (void)m_free(m); 951 return (ENOPROTOOPT); 952 } 953 *mp = m; 954 return (0); 955 } 956 } 957 958 sohasoutofband(so) 959 register struct socket *so; 960 { 961 struct proc *p; 962 963 if (so->so_pgid < 0) 964 gsignal(-so->so_pgid, SIGURG); 965 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 966 psignal(p, SIGURG); 967 if (so->so_rcv.sb_sel) { 968 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 969 so->so_rcv.sb_sel = 0; 970 so->so_rcv.sb_flags &= ~SB_COLL; 971 } 972 } 973