1 /* $OpenBSD: uipc_usrreq.c,v 1.50 2011/04/04 12:44:10 deraadt Exp $ */ 2 /* $NetBSD: uipc_usrreq.c,v 1.18 1996/02/09 19:00:50 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/filedesc.h> 39 #include <sys/domain.h> 40 #include <sys/protosw.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/unpcb.h> 44 #include <sys/un.h> 45 #include <sys/namei.h> 46 #include <sys/vnode.h> 47 #include <sys/file.h> 48 #include <sys/stat.h> 49 #include <sys/mbuf.h> 50 51 /* 52 * Unix communications domain. 53 * 54 * TODO: 55 * SEQPACKET, RDM 56 * rethink name space problems 57 * need a proper out-of-band 58 */ 59 struct sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX }; 60 ino_t unp_ino; /* prototype for fake inode numbers */ 61 62 /*ARGSUSED*/ 63 int 64 uipc_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam, 65 struct mbuf *control, struct proc *p) 66 { 67 struct unpcb *unp = sotounpcb(so); 68 struct socket *so2; 69 int error = 0; 70 71 if (req == PRU_CONTROL) 72 return (EOPNOTSUPP); 73 if (req != PRU_SEND && control && control->m_len) { 74 error = EOPNOTSUPP; 75 goto release; 76 } 77 if (unp == NULL && req != PRU_ATTACH) { 78 error = EINVAL; 79 goto release; 80 } 81 switch (req) { 82 83 case PRU_ATTACH: 84 if (unp) { 85 error = EISCONN; 86 break; 87 } 88 error = unp_attach(so); 89 break; 90 91 case PRU_DETACH: 92 unp_detach(unp); 93 break; 94 95 case PRU_BIND: 96 error = unp_bind(unp, nam, p); 97 break; 98 99 case PRU_LISTEN: 100 if (unp->unp_vnode == NULL) 101 error = EINVAL; 102 break; 103 104 case PRU_CONNECT: 105 error = unp_connect(so, nam, p); 106 break; 107 108 case PRU_CONNECT2: 109 error = unp_connect2(so, (struct socket *)nam); 110 break; 111 112 case PRU_DISCONNECT: 113 unp_disconnect(unp); 114 break; 115 116 case PRU_ACCEPT: 117 /* 118 * Pass back name of connected socket, 119 * if it was bound and we are still connected 120 * (our peer may have closed already!). 121 */ 122 if (unp->unp_conn && unp->unp_conn->unp_addr) { 123 nam->m_len = unp->unp_conn->unp_addr->m_len; 124 bcopy(mtod(unp->unp_conn->unp_addr, caddr_t), 125 mtod(nam, caddr_t), (unsigned)nam->m_len); 126 } else { 127 nam->m_len = sizeof(sun_noname); 128 *(mtod(nam, struct sockaddr *)) = sun_noname; 129 } 130 break; 131 132 case PRU_SHUTDOWN: 133 socantsendmore(so); 134 unp_shutdown(unp); 135 break; 136 137 case PRU_RCVD: 138 switch (so->so_type) { 139 140 case SOCK_DGRAM: 141 panic("uipc 1"); 142 /*NOTREACHED*/ 143 144 case SOCK_STREAM: 145 #define rcv (&so->so_rcv) 146 #define snd (&so2->so_snd) 147 if (unp->unp_conn == NULL) 148 break; 149 so2 = unp->unp_conn->unp_socket; 150 /* 151 * Adjust backpressure on sender 152 * and wakeup any waiting to write. 153 */ 154 snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt; 155 unp->unp_mbcnt = rcv->sb_mbcnt; 156 snd->sb_hiwat += unp->unp_cc - rcv->sb_cc; 157 unp->unp_cc = rcv->sb_cc; 158 sowwakeup(so2); 159 #undef snd 160 #undef rcv 161 break; 162 163 default: 164 panic("uipc 2"); 165 } 166 break; 167 168 case PRU_SEND: 169 if (control && (error = unp_internalize(control, p))) 170 break; 171 switch (so->so_type) { 172 173 case SOCK_DGRAM: { 174 struct sockaddr *from; 175 176 if (nam) { 177 if (unp->unp_conn) { 178 error = EISCONN; 179 break; 180 } 181 error = unp_connect(so, nam, p); 182 if (error) 183 break; 184 } else { 185 if (unp->unp_conn == NULL) { 186 error = ENOTCONN; 187 break; 188 } 189 } 190 so2 = unp->unp_conn->unp_socket; 191 if (unp->unp_addr) 192 from = mtod(unp->unp_addr, struct sockaddr *); 193 else 194 from = &sun_noname; 195 if (sbappendaddr(&so2->so_rcv, from, m, control)) { 196 sorwakeup(so2); 197 m = NULL; 198 control = NULL; 199 } else 200 error = ENOBUFS; 201 if (nam) 202 unp_disconnect(unp); 203 break; 204 } 205 206 case SOCK_STREAM: 207 #define rcv (&so2->so_rcv) 208 #define snd (&so->so_snd) 209 if (so->so_state & SS_CANTSENDMORE) { 210 error = EPIPE; 211 break; 212 } 213 if (unp->unp_conn == NULL) { 214 error = ENOTCONN; 215 break; 216 } 217 so2 = unp->unp_conn->unp_socket; 218 /* 219 * Send to paired receive port, and then reduce 220 * send buffer hiwater marks to maintain backpressure. 221 * Wake up readers. 222 */ 223 if (control) { 224 if (sbappendcontrol(rcv, m, control)) 225 control = NULL; 226 } else 227 sbappend(rcv, m); 228 snd->sb_mbmax -= 229 rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt; 230 unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt; 231 snd->sb_hiwat -= rcv->sb_cc - unp->unp_conn->unp_cc; 232 unp->unp_conn->unp_cc = rcv->sb_cc; 233 sorwakeup(so2); 234 m = NULL; 235 #undef snd 236 #undef rcv 237 break; 238 239 default: 240 panic("uipc 4"); 241 } 242 /* we need to undo unp_internalize in case of errors */ 243 if (control && error) 244 unp_dispose(control); 245 break; 246 247 case PRU_ABORT: 248 unp_drop(unp, ECONNABORTED); 249 break; 250 251 case PRU_SENSE: 252 ((struct stat *) m)->st_blksize = so->so_snd.sb_hiwat; 253 if (so->so_type == SOCK_STREAM && unp->unp_conn != NULL) { 254 so2 = unp->unp_conn->unp_socket; 255 ((struct stat *) m)->st_blksize += so2->so_rcv.sb_cc; 256 } 257 ((struct stat *) m)->st_dev = NODEV; 258 if (unp->unp_ino == 0) 259 unp->unp_ino = unp_ino++; 260 ((struct stat *) m)->st_atim = 261 ((struct stat *) m)->st_mtim = 262 ((struct stat *) m)->st_ctim = unp->unp_ctime; 263 ((struct stat *) m)->st_ino = unp->unp_ino; 264 return (0); 265 266 case PRU_RCVOOB: 267 return (EOPNOTSUPP); 268 269 case PRU_SENDOOB: 270 error = EOPNOTSUPP; 271 break; 272 273 case PRU_SOCKADDR: 274 if (unp->unp_addr) { 275 nam->m_len = unp->unp_addr->m_len; 276 bcopy(mtod(unp->unp_addr, caddr_t), 277 mtod(nam, caddr_t), (unsigned)nam->m_len); 278 } else 279 nam->m_len = 0; 280 break; 281 282 case PRU_PEERADDR: 283 if (unp->unp_conn && unp->unp_conn->unp_addr) { 284 nam->m_len = unp->unp_conn->unp_addr->m_len; 285 bcopy(mtod(unp->unp_conn->unp_addr, caddr_t), 286 mtod(nam, caddr_t), (unsigned)nam->m_len); 287 } else 288 nam->m_len = 0; 289 break; 290 291 case PRU_SLOWTIMO: 292 break; 293 294 default: 295 panic("piusrreq"); 296 } 297 release: 298 if (control) 299 m_freem(control); 300 if (m) 301 m_freem(m); 302 return (error); 303 } 304 305 /* 306 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 307 * for stream sockets, although the total for sender and receiver is 308 * actually only PIPSIZ. 309 * Datagram sockets really use the sendspace as the maximum datagram size, 310 * and don't really want to reserve the sendspace. Their recvspace should 311 * be large enough for at least one max-size datagram plus address. 312 */ 313 #define PIPSIZ 4096 314 u_long unpst_sendspace = PIPSIZ; 315 u_long unpst_recvspace = PIPSIZ; 316 u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 317 u_long unpdg_recvspace = 4*1024; 318 319 int unp_rights; /* file descriptors in flight */ 320 321 int 322 unp_attach(struct socket *so) 323 { 324 struct unpcb *unp; 325 int error; 326 327 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 328 switch (so->so_type) { 329 330 case SOCK_STREAM: 331 error = soreserve(so, unpst_sendspace, unpst_recvspace); 332 break; 333 334 case SOCK_DGRAM: 335 error = soreserve(so, unpdg_sendspace, unpdg_recvspace); 336 break; 337 338 default: 339 panic("unp_attach"); 340 } 341 if (error) 342 return (error); 343 } 344 unp = malloc(sizeof(*unp), M_PCB, M_NOWAIT|M_ZERO); 345 if (unp == NULL) 346 return (ENOBUFS); 347 unp->unp_socket = so; 348 so->so_pcb = unp; 349 getnanotime(&unp->unp_ctime); 350 return (0); 351 } 352 353 void 354 unp_detach(struct unpcb *unp) 355 { 356 357 if (unp->unp_vnode) { 358 unp->unp_vnode->v_socket = NULL; 359 vrele(unp->unp_vnode); 360 unp->unp_vnode = NULL; 361 } 362 if (unp->unp_conn) 363 unp_disconnect(unp); 364 while (unp->unp_refs) 365 unp_drop(unp->unp_refs, ECONNRESET); 366 soisdisconnected(unp->unp_socket); 367 unp->unp_socket->so_pcb = NULL; 368 m_freem(unp->unp_addr); 369 if (unp_rights) { 370 /* 371 * Normally the receive buffer is flushed later, 372 * in sofree, but if our receive buffer holds references 373 * to descriptors that are now garbage, we will dispose 374 * of those descriptor references after the garbage collector 375 * gets them (resulting in a "panic: closef: count < 0"). 376 */ 377 sorflush(unp->unp_socket); 378 free(unp, M_PCB); 379 unp_gc(); 380 } else 381 free(unp, M_PCB); 382 } 383 384 int 385 unp_bind(struct unpcb *unp, struct mbuf *nam, struct proc *p) 386 { 387 struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *); 388 struct vnode *vp; 389 struct vattr vattr; 390 int error, namelen; 391 struct nameidata nd; 392 char buf[MLEN]; 393 394 if (unp->unp_vnode != NULL) 395 return (EINVAL); 396 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 397 if (namelen <= 0 || namelen >= MLEN) 398 return EINVAL; 399 strncpy(buf, soun->sun_path, namelen); 400 buf[namelen] = 0; /* null-terminate the string */ 401 NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE, buf, p); 402 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 403 if ((error = namei(&nd)) != 0) 404 return (error); 405 vp = nd.ni_vp; 406 if (vp != NULL) { 407 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 408 if (nd.ni_dvp == vp) 409 vrele(nd.ni_dvp); 410 else 411 vput(nd.ni_dvp); 412 vrele(vp); 413 return (EADDRINUSE); 414 } 415 VATTR_NULL(&vattr); 416 vattr.va_type = VSOCK; 417 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; 418 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 419 if (error) 420 return (error); 421 vp = nd.ni_vp; 422 vp->v_socket = unp->unp_socket; 423 unp->unp_vnode = vp; 424 unp->unp_addr = m_copy(nam, 0, (int)M_COPYALL); 425 unp->unp_connid.uid = p->p_ucred->cr_uid; 426 unp->unp_connid.gid = p->p_ucred->cr_gid; 427 unp->unp_connid.pid = p->p_p->ps_mainproc->p_pid; 428 unp->unp_flags |= UNP_FEIDSBIND; 429 VOP_UNLOCK(vp, 0, p); 430 return (0); 431 } 432 433 int 434 unp_connect(struct socket *so, struct mbuf *nam, struct proc *p) 435 { 436 struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *); 437 struct vnode *vp; 438 struct socket *so2, *so3; 439 struct unpcb *unp, *unp2, *unp3; 440 int error; 441 struct nameidata nd; 442 443 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, soun->sun_path, p); 444 if (nam->m_data + nam->m_len == &nam->m_dat[MLEN]) { /* XXX */ 445 if (*(mtod(nam, caddr_t) + nam->m_len - 1) != 0) 446 return (EMSGSIZE); 447 } else 448 *(mtod(nam, caddr_t) + nam->m_len) = 0; 449 if ((error = namei(&nd)) != 0) 450 return (error); 451 vp = nd.ni_vp; 452 if (vp->v_type != VSOCK) { 453 error = ENOTSOCK; 454 goto bad; 455 } 456 if ((error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) != 0) 457 goto bad; 458 so2 = vp->v_socket; 459 if (so2 == NULL) { 460 error = ECONNREFUSED; 461 goto bad; 462 } 463 if (so->so_type != so2->so_type) { 464 error = EPROTOTYPE; 465 goto bad; 466 } 467 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 468 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 469 (so3 = sonewconn(so2, 0)) == 0) { 470 error = ECONNREFUSED; 471 goto bad; 472 } 473 unp = sotounpcb(so); 474 unp2 = sotounpcb(so2); 475 unp3 = sotounpcb(so3); 476 if (unp2->unp_addr) 477 unp3->unp_addr = 478 m_copy(unp2->unp_addr, 0, (int)M_COPYALL); 479 unp3->unp_connid.uid = p->p_ucred->cr_uid; 480 unp3->unp_connid.gid = p->p_ucred->cr_gid; 481 unp3->unp_connid.pid = p->p_p->ps_mainproc->p_pid; 482 unp3->unp_flags |= UNP_FEIDS; 483 so2 = so3; 484 if (unp2->unp_flags & UNP_FEIDSBIND) { 485 unp->unp_connid = unp2->unp_connid; 486 unp->unp_flags |= UNP_FEIDS; 487 } 488 } 489 error = unp_connect2(so, so2); 490 bad: 491 vput(vp); 492 return (error); 493 } 494 495 int 496 unp_connect2(struct socket *so, struct socket *so2) 497 { 498 struct unpcb *unp = sotounpcb(so); 499 struct unpcb *unp2; 500 501 if (so2->so_type != so->so_type) 502 return (EPROTOTYPE); 503 unp2 = sotounpcb(so2); 504 unp->unp_conn = unp2; 505 switch (so->so_type) { 506 507 case SOCK_DGRAM: 508 unp->unp_nextref = unp2->unp_refs; 509 unp2->unp_refs = unp; 510 soisconnected(so); 511 break; 512 513 case SOCK_STREAM: 514 unp2->unp_conn = unp; 515 soisconnected(so); 516 soisconnected(so2); 517 break; 518 519 default: 520 panic("unp_connect2"); 521 } 522 return (0); 523 } 524 525 void 526 unp_disconnect(struct unpcb *unp) 527 { 528 struct unpcb *unp2 = unp->unp_conn; 529 530 if (unp2 == NULL) 531 return; 532 unp->unp_conn = NULL; 533 switch (unp->unp_socket->so_type) { 534 535 case SOCK_DGRAM: 536 if (unp2->unp_refs == unp) 537 unp2->unp_refs = unp->unp_nextref; 538 else { 539 unp2 = unp2->unp_refs; 540 for (;;) { 541 if (unp2 == NULL) 542 panic("unp_disconnect"); 543 if (unp2->unp_nextref == unp) 544 break; 545 unp2 = unp2->unp_nextref; 546 } 547 unp2->unp_nextref = unp->unp_nextref; 548 } 549 unp->unp_nextref = NULL; 550 unp->unp_socket->so_state &= ~SS_ISCONNECTED; 551 break; 552 553 case SOCK_STREAM: 554 soisdisconnected(unp->unp_socket); 555 unp2->unp_conn = NULL; 556 soisdisconnected(unp2->unp_socket); 557 break; 558 } 559 } 560 561 #ifdef notdef 562 unp_abort(struct unpcb *unp) 563 { 564 unp_detach(unp); 565 } 566 #endif 567 568 void 569 unp_shutdown(struct unpcb *unp) 570 { 571 struct socket *so; 572 573 if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn && 574 (so = unp->unp_conn->unp_socket)) 575 socantrcvmore(so); 576 } 577 578 void 579 unp_drop(struct unpcb *unp, int errno) 580 { 581 struct socket *so = unp->unp_socket; 582 583 so->so_error = errno; 584 unp_disconnect(unp); 585 if (so->so_head) { 586 so->so_pcb = NULL; 587 sofree(so); 588 m_freem(unp->unp_addr); 589 free(unp, M_PCB); 590 } 591 } 592 593 #ifdef notdef 594 unp_drain(void) 595 { 596 597 } 598 #endif 599 600 int 601 unp_externalize(struct mbuf *rights, socklen_t controllen) 602 { 603 struct proc *p = curproc; /* XXX */ 604 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 605 int i, *fdp; 606 struct file **rp; 607 struct file *fp; 608 int nfds, error = 0; 609 610 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / 611 sizeof(struct file *); 612 if (controllen < CMSG_ALIGN(sizeof(struct cmsghdr))) 613 controllen = 0; 614 else 615 controllen -= CMSG_ALIGN(sizeof(struct cmsghdr)); 616 if (nfds > controllen / sizeof(int)) 617 nfds = controllen / sizeof(int); 618 619 rp = (struct file **)CMSG_DATA(cm); 620 621 fdp = malloc(nfds * sizeof(int), M_TEMP, M_WAITOK); 622 623 /* Make sure the recipient should be able to see the descriptors.. */ 624 if (p->p_fd->fd_rdir != NULL) { 625 rp = (struct file **)CMSG_DATA(cm); 626 for (i = 0; i < nfds; i++) { 627 fp = *rp++; 628 /* 629 * No to block devices. If passing a directory, 630 * make sure that it is underneath the root. 631 */ 632 if (fp->f_type == DTYPE_VNODE) { 633 struct vnode *vp = (struct vnode *)fp->f_data; 634 635 if (vp->v_type == VBLK || 636 (vp->v_type == VDIR && 637 !vn_isunder(vp, p->p_fd->fd_rdir, p))) { 638 error = EPERM; 639 break; 640 } 641 } 642 } 643 } 644 645 restart: 646 fdplock(p->p_fd); 647 if (error != 0) { 648 rp = ((struct file **)CMSG_DATA(cm)); 649 for (i = 0; i < nfds; i++) { 650 fp = *rp; 651 /* 652 * zero the pointer before calling unp_discard, 653 * since it may end up in unp_gc().. 654 */ 655 *rp++ = NULL; 656 unp_discard(fp); 657 } 658 goto out; 659 } 660 661 /* 662 * First loop -- allocate file descriptor table slots for the 663 * new descriptors. 664 */ 665 rp = ((struct file **)CMSG_DATA(cm)); 666 for (i = 0; i < nfds; i++) { 667 bcopy(rp, &fp, sizeof(fp)); 668 rp++; 669 if ((error = fdalloc(p, 0, &fdp[i])) != 0) { 670 /* 671 * Back out what we've done so far. 672 */ 673 for (--i; i >= 0; i--) 674 fdremove(p->p_fd, fdp[i]); 675 676 if (error == ENOSPC) { 677 fdexpand(p); 678 error = 0; 679 } else { 680 /* 681 * This is the error that has historically 682 * been returned, and some callers may 683 * expect it. 684 */ 685 error = EMSGSIZE; 686 } 687 fdpunlock(p->p_fd); 688 goto restart; 689 } 690 691 /* 692 * Make the slot reference the descriptor so that 693 * fdalloc() works properly.. We finalize it all 694 * in the loop below. 695 */ 696 p->p_fd->fd_ofiles[fdp[i]] = fp; 697 } 698 699 /* 700 * Now that adding them has succeeded, update all of the 701 * descriptor passing state. 702 */ 703 rp = (struct file **)CMSG_DATA(cm); 704 for (i = 0; i < nfds; i++) { 705 fp = *rp++; 706 fp->f_msgcount--; 707 unp_rights--; 708 } 709 710 /* 711 * Copy temporary array to message and adjust length, in case of 712 * transition from large struct file pointers to ints. 713 */ 714 memcpy(CMSG_DATA(cm), fdp, nfds * sizeof(int)); 715 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int)); 716 rights->m_len = CMSG_LEN(nfds * sizeof(int)); 717 out: 718 fdpunlock(p->p_fd); 719 free(fdp, M_TEMP); 720 return (error); 721 } 722 723 int 724 unp_internalize(struct mbuf *control, struct proc *p) 725 { 726 struct filedesc *fdp = p->p_fd; 727 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 728 struct file **rp, *fp; 729 int i, error; 730 int nfds, *ip, fd, neededspace; 731 732 /* 733 * Check for two potential msg_controllen values because 734 * IETF stuck their nose in a place it does not belong. 735 */ 736 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || 737 !(cm->cmsg_len == control->m_len || 738 control->m_len == CMSG_ALIGN(cm->cmsg_len))) 739 return (EINVAL); 740 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof (int); 741 742 /* Make sure we have room for the struct file pointers */ 743 morespace: 744 neededspace = CMSG_SPACE(nfds * sizeof(struct file *)) - 745 control->m_len; 746 if (neededspace > M_TRAILINGSPACE(control)) { 747 /* if we already have a cluster, the message is just too big */ 748 if (control->m_flags & M_EXT) 749 return (E2BIG); 750 751 /* allocate a cluster and try again */ 752 MCLGET(control, M_WAIT); 753 if ((control->m_flags & M_EXT) == 0) 754 return (ENOBUFS); /* allocation failed */ 755 756 /* copy the data to the cluster */ 757 memcpy(mtod(control, char *), cm, cm->cmsg_len); 758 cm = mtod(control, struct cmsghdr *); 759 goto morespace; 760 } 761 762 /* adjust message & mbuf to note amount of space actually used. */ 763 cm->cmsg_len = CMSG_LEN(nfds * sizeof(struct file *)); 764 control->m_len = CMSG_SPACE(nfds * sizeof(struct file *)); 765 766 ip = ((int *)CMSG_DATA(cm)) + nfds - 1; 767 rp = ((struct file **)CMSG_DATA(cm)) + nfds - 1; 768 for (i = 0; i < nfds; i++) { 769 bcopy(ip, &fd, sizeof fd); 770 ip--; 771 if ((fp = fd_getfile(fdp, fd)) == NULL) { 772 error = EBADF; 773 goto fail; 774 } 775 if (fp->f_count == LONG_MAX-2 || 776 fp->f_msgcount == LONG_MAX-2) { 777 error = EDEADLK; 778 goto fail; 779 } 780 bcopy(&fp, rp, sizeof fp); 781 rp--; 782 fp->f_count++; 783 fp->f_msgcount++; 784 unp_rights++; 785 } 786 return (0); 787 fail: 788 /* Back out what we just did. */ 789 for ( ; i > 0; i--) { 790 rp++; 791 bcopy(rp, &fp, sizeof(fp)); 792 fp->f_count--; 793 fp->f_msgcount--; 794 unp_rights--; 795 } 796 797 return (error); 798 } 799 800 int unp_defer, unp_gcing; 801 extern struct domain unixdomain; 802 803 void 804 unp_gc(void) 805 { 806 struct file *fp, *nextfp; 807 struct socket *so; 808 struct file **extra_ref, **fpp; 809 int nunref, i; 810 811 if (unp_gcing) 812 return; 813 unp_gcing = 1; 814 unp_defer = 0; 815 LIST_FOREACH(fp, &filehead, f_list) 816 fp->f_flag &= ~(FMARK|FDEFER); 817 do { 818 LIST_FOREACH(fp, &filehead, f_list) { 819 if (fp->f_flag & FDEFER) { 820 fp->f_flag &= ~FDEFER; 821 unp_defer--; 822 } else { 823 if (fp->f_count == 0) 824 continue; 825 if (fp->f_flag & FMARK) 826 continue; 827 if (fp->f_count == fp->f_msgcount) 828 continue; 829 } 830 fp->f_flag |= FMARK; 831 832 if (fp->f_type != DTYPE_SOCKET || 833 (so = (struct socket *)fp->f_data) == NULL) 834 continue; 835 if (so->so_proto->pr_domain != &unixdomain || 836 (so->so_proto->pr_flags&PR_RIGHTS) == 0) 837 continue; 838 #ifdef notdef 839 if (so->so_rcv.sb_flags & SB_LOCK) { 840 /* 841 * This is problematical; it's not clear 842 * we need to wait for the sockbuf to be 843 * unlocked (on a uniprocessor, at least), 844 * and it's also not clear what to do 845 * if sbwait returns an error due to receipt 846 * of a signal. If sbwait does return 847 * an error, we'll go into an infinite 848 * loop. Delete all of this for now. 849 */ 850 (void) sbwait(&so->so_rcv); 851 goto restart; 852 } 853 #endif 854 unp_scan(so->so_rcv.sb_mb, unp_mark, 0); 855 } 856 } while (unp_defer); 857 /* 858 * We grab an extra reference to each of the file table entries 859 * that are not otherwise accessible and then free the rights 860 * that are stored in messages on them. 861 * 862 * The bug in the original code is a little tricky, so I'll describe 863 * what's wrong with it here. 864 * 865 * It is incorrect to simply unp_discard each entry for f_msgcount 866 * times -- consider the case of sockets A and B that contain 867 * references to each other. On a last close of some other socket, 868 * we trigger a gc since the number of outstanding rights (unp_rights) 869 * is non-zero. If during the sweep phase the gc code un_discards, 870 * we end up doing a (full) closef on the descriptor. A closef on A 871 * results in the following chain. Closef calls soo_close, which 872 * calls soclose. Soclose calls first (through the switch 873 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply 874 * returns because the previous instance had set unp_gcing, and 875 * we return all the way back to soclose, which marks the socket 876 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush 877 * to free up the rights that are queued in messages on the socket A, 878 * i.e., the reference on B. The sorflush calls via the dom_dispose 879 * switch unp_dispose, which unp_scans with unp_discard. This second 880 * instance of unp_discard just calls closef on B. 881 * 882 * Well, a similar chain occurs on B, resulting in a sorflush on B, 883 * which results in another closef on A. Unfortunately, A is already 884 * being closed, and the descriptor has already been marked with 885 * SS_NOFDREF, and soclose panics at this point. 886 * 887 * Here, we first take an extra reference to each inaccessible 888 * descriptor. Then, we call sorflush ourself, since we know 889 * it is a Unix domain socket anyhow. After we destroy all the 890 * rights carried in messages, we do a last closef to get rid 891 * of our extra reference. This is the last close, and the 892 * unp_detach etc will shut down the socket. 893 * 894 * 91/09/19, bsy@cs.cmu.edu 895 */ 896 extra_ref = malloc(nfiles * sizeof(struct file *), M_FILE, M_WAITOK); 897 for (nunref = 0, fp = LIST_FIRST(&filehead), fpp = extra_ref; 898 fp != NULL; fp = nextfp) { 899 nextfp = LIST_NEXT(fp, f_list); 900 if (fp->f_count == 0) 901 continue; 902 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) { 903 *fpp++ = fp; 904 nunref++; 905 FREF(fp); 906 fp->f_count++; 907 } 908 } 909 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) 910 if ((*fpp)->f_type == DTYPE_SOCKET && (*fpp)->f_data != NULL) 911 sorflush((struct socket *)(*fpp)->f_data); 912 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) 913 (void) closef(*fpp, NULL); 914 free((caddr_t)extra_ref, M_FILE); 915 unp_gcing = 0; 916 } 917 918 void 919 unp_dispose(struct mbuf *m) 920 { 921 922 if (m) 923 unp_scan(m, unp_discard, 1); 924 } 925 926 void 927 unp_scan(struct mbuf *m0, void (*op)(struct file *), int discard) 928 { 929 struct mbuf *m; 930 struct file **rp, *fp; 931 struct cmsghdr *cm; 932 int i; 933 int qfds; 934 935 while (m0) { 936 for (m = m0; m; m = m->m_next) { 937 if (m->m_type == MT_CONTROL && 938 m->m_len >= sizeof(*cm)) { 939 cm = mtod(m, struct cmsghdr *); 940 if (cm->cmsg_level != SOL_SOCKET || 941 cm->cmsg_type != SCM_RIGHTS) 942 continue; 943 qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof *cm)) 944 / sizeof(struct file *); 945 rp = (struct file **)CMSG_DATA(cm); 946 for (i = 0; i < qfds; i++) { 947 fp = *rp; 948 if (discard) 949 *rp = 0; 950 (*op)(fp); 951 rp++; 952 } 953 break; /* XXX, but saves time */ 954 } 955 } 956 m0 = m0->m_nextpkt; 957 } 958 } 959 960 void 961 unp_mark(struct file *fp) 962 { 963 if (fp == NULL) 964 return; 965 966 if (fp->f_flag & FMARK) 967 return; 968 969 if (fp->f_flag & FDEFER) 970 return; 971 972 if (fp->f_type == DTYPE_SOCKET) { 973 unp_defer++; 974 fp->f_flag |= FDEFER; 975 } else { 976 fp->f_flag |= FMARK; 977 } 978 } 979 980 void 981 unp_discard(struct file *fp) 982 { 983 984 if (fp == NULL) 985 return; 986 FREF(fp); 987 fp->f_msgcount--; 988 unp_rights--; 989 (void) closef(fp, NULL); 990 } 991