1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.54.2.10 2003/03/04 17:28:09 nectar Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/domain.h> 37 #include <sys/fcntl.h> 38 #include <sys/malloc.h> /* XXX must be before <sys/file.h> */ 39 #include <sys/proc.h> 40 #include <sys/file.h> 41 #include <sys/filedesc.h> 42 #include <sys/mbuf.h> 43 #include <sys/nlookup.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/resourcevar.h> 48 #include <sys/stat.h> 49 #include <sys/mount.h> 50 #include <sys/sysctl.h> 51 #include <sys/un.h> 52 #include <sys/unpcb.h> 53 #include <sys/vnode.h> 54 #include <sys/kern_syscall.h> 55 #include <sys/taskqueue.h> 56 57 #include <sys/file2.h> 58 #include <sys/spinlock2.h> 59 #include <sys/socketvar2.h> 60 #include <sys/msgport2.h> 61 62 #define UNP_DETACHED UNP_PRIVATE1 63 #define UNP_CONNECTING UNP_PRIVATE2 64 #define UNP_DROPPED UNP_PRIVATE3 65 #define UNP_MARKER UNP_PRIVATE4 66 67 #define UNP_ISATTACHED(unp) \ 68 ((unp) != NULL && ((unp)->unp_flags & UNP_DETACHED) == 0) 69 70 #ifdef INVARIANTS 71 #define UNP_ASSERT_TOKEN_HELD(unp) \ 72 ASSERT_LWKT_TOKEN_HELD(lwkt_token_pool_lookup((unp))) 73 #else /* !INVARIANTS */ 74 #define UNP_ASSERT_TOKEN_HELD(unp) 75 #endif /* INVARIANTS */ 76 77 struct unp_defdiscard { 78 SLIST_ENTRY(unp_defdiscard) next; 79 struct file *fp; 80 }; 81 SLIST_HEAD(unp_defdiscard_list, unp_defdiscard); 82 83 TAILQ_HEAD(unpcb_qhead, unpcb); 84 struct unp_global_head { 85 struct unpcb_qhead list; 86 int count; 87 }; 88 89 static MALLOC_DEFINE(M_UNPCB, "unpcb", "unpcb struct"); 90 static unp_gen_t unp_gencnt; 91 92 static struct unp_global_head unp_stream_head; 93 static struct unp_global_head unp_dgram_head; 94 static struct unp_global_head unp_seqpkt_head; 95 96 static struct lwkt_token unp_token = LWKT_TOKEN_INITIALIZER(unp_token); 97 static struct taskqueue *unp_taskqueue; 98 99 static struct unp_defdiscard_list unp_defdiscard_head; 100 static struct spinlock unp_defdiscard_spin; 101 static struct task unp_defdiscard_task; 102 103 /* 104 * Unix communications domain. 105 * 106 * TODO: 107 * RDM 108 * rethink name space problems 109 * need a proper out-of-band 110 * lock pushdown 111 */ 112 static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL }; 113 static ino_t unp_ino = 1; /* prototype for fake inode numbers */ 114 115 static int unp_attach (struct socket *, struct pru_attach_info *); 116 static void unp_detach (struct unpcb *); 117 static int unp_bind (struct unpcb *,struct sockaddr *, struct thread *); 118 static int unp_connect (struct socket *,struct sockaddr *, 119 struct thread *); 120 static void unp_disconnect(struct unpcb *, int); 121 static void unp_shutdown (struct unpcb *); 122 static void unp_gc (void); 123 static int unp_gc_clearmarks(struct file *, void *); 124 static int unp_gc_checkmarks(struct file *, void *); 125 static int unp_gc_checkrefs(struct file *, void *); 126 static void unp_scan (struct mbuf *, void (*)(struct file *, void *), 127 void *data); 128 static void unp_mark (struct file *, void *data); 129 static void unp_discard (struct file *, void *); 130 static int unp_internalize (struct mbuf *, struct thread *); 131 static int unp_listen (struct unpcb *, struct thread *); 132 static void unp_fp_externalize(struct lwp *lp, struct file *fp, int fd); 133 static int unp_find_lockref(struct sockaddr *nam, struct thread *td, 134 short type, struct unpcb **unp_ret); 135 static int unp_connect_pair(struct unpcb *unp, struct unpcb *unp2); 136 static void unp_drop(struct unpcb *unp, int error); 137 static void unp_defdiscard_taskfunc(void *, int); 138 139 /* 140 * SMP Considerations: 141 * 142 * Since unp_token will be automaticly released upon execution of 143 * blocking code, we need to reference unp_conn before any possible 144 * blocking code to prevent it from being ripped behind our back. 145 * 146 * Any adjustment to unp->unp_conn requires both the global unp_token 147 * AND the per-unp token (lwkt_token_pool_lookup(unp)) to be held. 148 * 149 * Any access to so_pcb to obtain unp requires the pool token for 150 * unp to be held. 151 */ 152 153 static __inline void 154 unp_reference(struct unpcb *unp) 155 { 156 /* 0->1 transition will not work */ 157 KKASSERT(unp->unp_refcnt > 0); 158 atomic_add_int(&unp->unp_refcnt, 1); 159 } 160 161 static __inline void 162 unp_free(struct unpcb *unp) 163 { 164 KKASSERT(unp->unp_refcnt > 0); 165 if (atomic_fetchadd_int(&unp->unp_refcnt, -1) == 1) 166 unp_detach(unp); 167 } 168 169 static __inline struct unpcb * 170 unp_getsocktoken(struct socket *so) 171 { 172 struct unpcb *unp; 173 174 /* 175 * The unp pointer is invalid until we verify that it is 176 * good by re-checking so_pcb AFTER obtaining the token. 177 */ 178 while ((unp = so->so_pcb) != NULL) { 179 lwkt_getpooltoken(unp); 180 if (unp == so->so_pcb) 181 break; 182 lwkt_relpooltoken(unp); 183 } 184 return unp; 185 } 186 187 static __inline void 188 unp_reltoken(struct unpcb *unp) 189 { 190 if (unp != NULL) 191 lwkt_relpooltoken(unp); 192 } 193 194 static __inline void 195 unp_setflags(struct unpcb *unp, int flags) 196 { 197 atomic_set_int(&unp->unp_flags, flags); 198 } 199 200 static __inline void 201 unp_clrflags(struct unpcb *unp, int flags) 202 { 203 atomic_clear_int(&unp->unp_flags, flags); 204 } 205 206 static __inline struct unp_global_head * 207 unp_globalhead(short type) 208 { 209 switch (type) { 210 case SOCK_STREAM: 211 return &unp_stream_head; 212 case SOCK_DGRAM: 213 return &unp_dgram_head; 214 case SOCK_SEQPACKET: 215 return &unp_seqpkt_head; 216 default: 217 panic("unknown socket type %d", type); 218 } 219 } 220 221 /* 222 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort() 223 * will sofree() it when we return. 224 */ 225 static void 226 uipc_abort(netmsg_t msg) 227 { 228 struct unpcb *unp; 229 int error; 230 231 lwkt_gettoken(&unp_token); 232 unp = unp_getsocktoken(msg->base.nm_so); 233 234 if (UNP_ISATTACHED(unp)) { 235 unp_setflags(unp, UNP_DETACHED); 236 unp_drop(unp, ECONNABORTED); 237 unp_free(unp); 238 error = 0; 239 } else { 240 error = EINVAL; 241 } 242 243 unp_reltoken(unp); 244 lwkt_reltoken(&unp_token); 245 246 lwkt_replymsg(&msg->lmsg, error); 247 } 248 249 static void 250 uipc_accept(netmsg_t msg) 251 { 252 struct unpcb *unp; 253 int error; 254 255 lwkt_gettoken(&unp_token); 256 unp = unp_getsocktoken(msg->base.nm_so); 257 258 if (!UNP_ISATTACHED(unp)) { 259 error = EINVAL; 260 } else { 261 struct unpcb *unp2 = unp->unp_conn; 262 263 /* 264 * Pass back name of connected socket, 265 * if it was bound and we are still connected 266 * (our peer may have closed already!). 267 */ 268 if (unp2 && unp2->unp_addr) { 269 unp_reference(unp2); 270 *msg->accept.nm_nam = dup_sockaddr( 271 (struct sockaddr *)unp2->unp_addr); 272 unp_free(unp2); 273 } else { 274 *msg->accept.nm_nam = dup_sockaddr(&sun_noname); 275 } 276 error = 0; 277 } 278 279 unp_reltoken(unp); 280 lwkt_reltoken(&unp_token); 281 282 lwkt_replymsg(&msg->lmsg, error); 283 } 284 285 static void 286 uipc_attach(netmsg_t msg) 287 { 288 int error; 289 290 lwkt_gettoken(&unp_token); 291 292 KASSERT(msg->base.nm_so->so_pcb == NULL, ("double unp attach")); 293 error = unp_attach(msg->base.nm_so, msg->attach.nm_ai); 294 295 lwkt_reltoken(&unp_token); 296 lwkt_replymsg(&msg->lmsg, error); 297 } 298 299 static void 300 uipc_bind(netmsg_t msg) 301 { 302 struct unpcb *unp; 303 int error; 304 305 lwkt_gettoken(&unp_token); 306 unp = unp_getsocktoken(msg->base.nm_so); 307 308 if (UNP_ISATTACHED(unp)) 309 error = unp_bind(unp, msg->bind.nm_nam, msg->bind.nm_td); 310 else 311 error = EINVAL; 312 313 unp_reltoken(unp); 314 lwkt_reltoken(&unp_token); 315 316 lwkt_replymsg(&msg->lmsg, error); 317 } 318 319 static void 320 uipc_connect(netmsg_t msg) 321 { 322 int error; 323 324 error = unp_connect(msg->base.nm_so, msg->connect.nm_nam, 325 msg->connect.nm_td); 326 lwkt_replymsg(&msg->lmsg, error); 327 } 328 329 static void 330 uipc_connect2(netmsg_t msg) 331 { 332 int error; 333 334 error = unp_connect2(msg->connect2.nm_so1, msg->connect2.nm_so2); 335 lwkt_replymsg(&msg->lmsg, error); 336 } 337 338 /* control is EOPNOTSUPP */ 339 340 static void 341 uipc_detach(netmsg_t msg) 342 { 343 struct unpcb *unp; 344 int error; 345 346 lwkt_gettoken(&unp_token); 347 unp = unp_getsocktoken(msg->base.nm_so); 348 349 if (UNP_ISATTACHED(unp)) { 350 unp_setflags(unp, UNP_DETACHED); 351 unp_drop(unp, 0); 352 unp_free(unp); 353 error = 0; 354 } else { 355 error = EINVAL; 356 } 357 358 unp_reltoken(unp); 359 lwkt_reltoken(&unp_token); 360 361 lwkt_replymsg(&msg->lmsg, error); 362 } 363 364 static void 365 uipc_disconnect(netmsg_t msg) 366 { 367 struct unpcb *unp; 368 int error; 369 370 lwkt_gettoken(&unp_token); 371 unp = unp_getsocktoken(msg->base.nm_so); 372 373 if (UNP_ISATTACHED(unp)) { 374 unp_disconnect(unp, 0); 375 error = 0; 376 } else { 377 error = EINVAL; 378 } 379 380 unp_reltoken(unp); 381 lwkt_reltoken(&unp_token); 382 383 lwkt_replymsg(&msg->lmsg, error); 384 } 385 386 static void 387 uipc_listen(netmsg_t msg) 388 { 389 struct unpcb *unp; 390 int error; 391 392 lwkt_gettoken(&unp_token); 393 unp = unp_getsocktoken(msg->base.nm_so); 394 395 if (!UNP_ISATTACHED(unp) || unp->unp_vnode == NULL) 396 error = EINVAL; 397 else 398 error = unp_listen(unp, msg->listen.nm_td); 399 400 unp_reltoken(unp); 401 lwkt_reltoken(&unp_token); 402 403 lwkt_replymsg(&msg->lmsg, error); 404 } 405 406 static void 407 uipc_peeraddr(netmsg_t msg) 408 { 409 struct unpcb *unp; 410 int error; 411 412 lwkt_gettoken(&unp_token); 413 unp = unp_getsocktoken(msg->base.nm_so); 414 415 if (!UNP_ISATTACHED(unp)) { 416 error = EINVAL; 417 } else if (unp->unp_conn && unp->unp_conn->unp_addr) { 418 struct unpcb *unp2 = unp->unp_conn; 419 420 unp_reference(unp2); 421 *msg->peeraddr.nm_nam = dup_sockaddr( 422 (struct sockaddr *)unp2->unp_addr); 423 unp_free(unp2); 424 error = 0; 425 } else { 426 /* 427 * XXX: It seems that this test always fails even when 428 * connection is established. So, this else clause is 429 * added as workaround to return PF_LOCAL sockaddr. 430 */ 431 *msg->peeraddr.nm_nam = dup_sockaddr(&sun_noname); 432 error = 0; 433 } 434 435 unp_reltoken(unp); 436 lwkt_reltoken(&unp_token); 437 438 lwkt_replymsg(&msg->lmsg, error); 439 } 440 441 static void 442 uipc_rcvd(netmsg_t msg) 443 { 444 struct unpcb *unp, *unp2; 445 struct socket *so; 446 struct socket *so2; 447 int error; 448 449 /* 450 * so_pcb is only modified with both the global and the unp 451 * pool token held. 452 */ 453 so = msg->base.nm_so; 454 unp = unp_getsocktoken(so); 455 456 if (!UNP_ISATTACHED(unp)) { 457 error = EINVAL; 458 goto done; 459 } 460 461 switch (so->so_type) { 462 case SOCK_DGRAM: 463 panic("uipc_rcvd DGRAM?"); 464 /*NOTREACHED*/ 465 case SOCK_STREAM: 466 case SOCK_SEQPACKET: 467 if (unp->unp_conn == NULL) 468 break; 469 unp2 = unp->unp_conn; /* protected by pool token */ 470 471 /* 472 * Because we are transfering mbufs directly to the 473 * peer socket we have to use SSB_STOP on the sender 474 * to prevent it from building up infinite mbufs. 475 * 476 * As in several places in this module w ehave to ref unp2 477 * to ensure that it does not get ripped out from under us 478 * if we block on the so2 token or in sowwakeup(). 479 */ 480 so2 = unp2->unp_socket; 481 unp_reference(unp2); 482 lwkt_gettoken(&so2->so_rcv.ssb_token); 483 if (so->so_rcv.ssb_cc < so2->so_snd.ssb_hiwat && 484 so->so_rcv.ssb_mbcnt < so2->so_snd.ssb_mbmax 485 ) { 486 atomic_clear_int(&so2->so_snd.ssb_flags, SSB_STOP); 487 488 sowwakeup(so2); 489 } 490 lwkt_reltoken(&so2->so_rcv.ssb_token); 491 unp_free(unp2); 492 break; 493 default: 494 panic("uipc_rcvd unknown socktype"); 495 /*NOTREACHED*/ 496 } 497 error = 0; 498 done: 499 unp_reltoken(unp); 500 lwkt_replymsg(&msg->lmsg, error); 501 } 502 503 /* pru_rcvoob is EOPNOTSUPP */ 504 505 static void 506 uipc_send(netmsg_t msg) 507 { 508 struct unpcb *unp, *unp2; 509 struct socket *so; 510 struct socket *so2; 511 struct mbuf *control; 512 struct mbuf *m; 513 int error = 0; 514 515 so = msg->base.nm_so; 516 control = msg->send.nm_control; 517 m = msg->send.nm_m; 518 519 /* 520 * so_pcb is only modified with both the global and the unp 521 * pool token held. 522 */ 523 so = msg->base.nm_so; 524 unp = unp_getsocktoken(so); 525 526 if (!UNP_ISATTACHED(unp)) { 527 error = EINVAL; 528 goto release; 529 } 530 531 if (msg->send.nm_flags & PRUS_OOB) { 532 error = EOPNOTSUPP; 533 goto release; 534 } 535 536 wakeup_start_delayed(); 537 538 if (control && (error = unp_internalize(control, msg->send.nm_td))) 539 goto release; 540 541 switch (so->so_type) { 542 case SOCK_DGRAM: 543 { 544 struct sockaddr *from; 545 546 if (msg->send.nm_addr) { 547 if (unp->unp_conn) { 548 error = EISCONN; 549 break; 550 } 551 error = unp_find_lockref(msg->send.nm_addr, 552 msg->send.nm_td, so->so_type, &unp2); 553 if (error) 554 break; 555 /* 556 * NOTE: 557 * unp2 is locked and referenced. 558 * 559 * We could unlock unp2 now, since it was checked 560 * and referenced. 561 */ 562 unp_reltoken(unp2); 563 } else { 564 if (unp->unp_conn == NULL) { 565 error = ENOTCONN; 566 break; 567 } 568 unp2 = unp->unp_conn; 569 unp_reference(unp2); 570 } 571 /* NOTE: unp2 is referenced. */ 572 so2 = unp2->unp_socket; 573 574 if (unp->unp_addr) 575 from = (struct sockaddr *)unp->unp_addr; 576 else 577 from = &sun_noname; 578 579 lwkt_gettoken(&so2->so_rcv.ssb_token); 580 if (ssb_appendaddr(&so2->so_rcv, from, m, control)) { 581 sorwakeup(so2); 582 m = NULL; 583 control = NULL; 584 } else { 585 error = ENOBUFS; 586 } 587 lwkt_reltoken(&so2->so_rcv.ssb_token); 588 589 unp_free(unp2); 590 break; 591 } 592 593 case SOCK_STREAM: 594 case SOCK_SEQPACKET: 595 /* Connect if not connected yet. */ 596 /* 597 * Note: A better implementation would complain 598 * if not equal to the peer's address. 599 */ 600 if (unp->unp_conn == NULL) { 601 if (msg->send.nm_addr) { 602 error = unp_connect(so, 603 msg->send.nm_addr, 604 msg->send.nm_td); 605 if (error) 606 break; /* XXX */ 607 } 608 /* 609 * NOTE: 610 * unp_conn still could be NULL, even if the 611 * above unp_connect() succeeds; since the 612 * current unp's token could be released due 613 * to blocking operations after unp_conn is 614 * assigned. 615 */ 616 if (unp->unp_conn == NULL) { 617 error = ENOTCONN; 618 break; 619 } 620 } 621 if (so->so_state & SS_CANTSENDMORE) { 622 error = EPIPE; 623 break; 624 } 625 626 unp2 = unp->unp_conn; 627 KASSERT(unp2 != NULL, ("unp is not connected")); 628 so2 = unp2->unp_socket; 629 630 unp_reference(unp2); 631 632 /* 633 * Send to paired receive port, and then reduce 634 * send buffer hiwater marks to maintain backpressure. 635 * Wake up readers. 636 */ 637 lwkt_gettoken(&so2->so_rcv.ssb_token); 638 if (control) { 639 if (ssb_appendcontrol(&so2->so_rcv, m, control)) { 640 control = NULL; 641 m = NULL; 642 } 643 } else if (so->so_type == SOCK_SEQPACKET) { 644 sbappendrecord(&so2->so_rcv.sb, m); 645 m = NULL; 646 } else { 647 sbappend(&so2->so_rcv.sb, m); 648 m = NULL; 649 } 650 651 /* 652 * Because we are transfering mbufs directly to the 653 * peer socket we have to use SSB_STOP on the sender 654 * to prevent it from building up infinite mbufs. 655 */ 656 if (so2->so_rcv.ssb_cc >= so->so_snd.ssb_hiwat || 657 so2->so_rcv.ssb_mbcnt >= so->so_snd.ssb_mbmax 658 ) { 659 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOP); 660 } 661 lwkt_reltoken(&so2->so_rcv.ssb_token); 662 sorwakeup(so2); 663 664 unp_free(unp2); 665 break; 666 667 default: 668 panic("uipc_send unknown socktype"); 669 } 670 671 /* 672 * SEND_EOF is equivalent to a SEND followed by a SHUTDOWN. 673 */ 674 if (msg->send.nm_flags & PRUS_EOF) { 675 socantsendmore(so); 676 unp_shutdown(unp); 677 } 678 679 if (control && error != 0) 680 unp_dispose(control); 681 release: 682 unp_reltoken(unp); 683 wakeup_end_delayed(); 684 685 if (control) 686 m_freem(control); 687 if (m) 688 m_freem(m); 689 lwkt_replymsg(&msg->lmsg, error); 690 } 691 692 /* 693 * MPSAFE 694 */ 695 static void 696 uipc_sense(netmsg_t msg) 697 { 698 struct unpcb *unp; 699 struct socket *so; 700 struct stat *sb; 701 int error; 702 703 so = msg->base.nm_so; 704 sb = msg->sense.nm_stat; 705 706 /* 707 * so_pcb is only modified with both the global and the unp 708 * pool token held. 709 */ 710 unp = unp_getsocktoken(so); 711 712 if (!UNP_ISATTACHED(unp)) { 713 error = EINVAL; 714 goto done; 715 } 716 717 sb->st_blksize = so->so_snd.ssb_hiwat; 718 sb->st_dev = NOUDEV; 719 if (unp->unp_ino == 0) { /* make up a non-zero inode number */ 720 unp->unp_ino = atomic_fetchadd_long(&unp_ino, 1); 721 if (__predict_false(unp->unp_ino == 0)) 722 unp->unp_ino = atomic_fetchadd_long(&unp_ino, 1); 723 } 724 sb->st_ino = unp->unp_ino; 725 error = 0; 726 done: 727 unp_reltoken(unp); 728 lwkt_replymsg(&msg->lmsg, error); 729 } 730 731 static void 732 uipc_shutdown(netmsg_t msg) 733 { 734 struct socket *so; 735 struct unpcb *unp; 736 int error; 737 738 /* 739 * so_pcb is only modified with both the global and the unp 740 * pool token held. 741 */ 742 so = msg->base.nm_so; 743 unp = unp_getsocktoken(so); 744 745 if (UNP_ISATTACHED(unp)) { 746 socantsendmore(so); 747 unp_shutdown(unp); 748 error = 0; 749 } else { 750 error = EINVAL; 751 } 752 753 unp_reltoken(unp); 754 lwkt_replymsg(&msg->lmsg, error); 755 } 756 757 static void 758 uipc_sockaddr(netmsg_t msg) 759 { 760 struct unpcb *unp; 761 int error; 762 763 /* 764 * so_pcb is only modified with both the global and the unp 765 * pool token held. 766 */ 767 unp = unp_getsocktoken(msg->base.nm_so); 768 769 if (UNP_ISATTACHED(unp)) { 770 if (unp->unp_addr) { 771 *msg->sockaddr.nm_nam = 772 dup_sockaddr((struct sockaddr *)unp->unp_addr); 773 } 774 error = 0; 775 } else { 776 error = EINVAL; 777 } 778 779 unp_reltoken(unp); 780 lwkt_replymsg(&msg->lmsg, error); 781 } 782 783 struct pr_usrreqs uipc_usrreqs = { 784 .pru_abort = uipc_abort, 785 .pru_accept = uipc_accept, 786 .pru_attach = uipc_attach, 787 .pru_bind = uipc_bind, 788 .pru_connect = uipc_connect, 789 .pru_connect2 = uipc_connect2, 790 .pru_control = pr_generic_notsupp, 791 .pru_detach = uipc_detach, 792 .pru_disconnect = uipc_disconnect, 793 .pru_listen = uipc_listen, 794 .pru_peeraddr = uipc_peeraddr, 795 .pru_rcvd = uipc_rcvd, 796 .pru_rcvoob = pr_generic_notsupp, 797 .pru_send = uipc_send, 798 .pru_sense = uipc_sense, 799 .pru_shutdown = uipc_shutdown, 800 .pru_sockaddr = uipc_sockaddr, 801 .pru_sosend = sosend, 802 .pru_soreceive = soreceive 803 }; 804 805 void 806 uipc_ctloutput(netmsg_t msg) 807 { 808 struct socket *so; 809 struct sockopt *sopt; 810 struct unpcb *unp; 811 int error = 0; 812 813 so = msg->base.nm_so; 814 sopt = msg->ctloutput.nm_sopt; 815 816 lwkt_gettoken(&unp_token); 817 unp = unp_getsocktoken(so); 818 819 if (!UNP_ISATTACHED(unp)) { 820 error = EINVAL; 821 goto done; 822 } 823 824 switch (sopt->sopt_dir) { 825 case SOPT_GET: 826 switch (sopt->sopt_name) { 827 case LOCAL_PEERCRED: 828 if (unp->unp_flags & UNP_HAVEPC) 829 soopt_from_kbuf(sopt, &unp->unp_peercred, 830 sizeof(unp->unp_peercred)); 831 else { 832 if (so->so_type == SOCK_STREAM) 833 error = ENOTCONN; 834 else if (so->so_type == SOCK_SEQPACKET) 835 error = ENOTCONN; 836 else 837 error = EINVAL; 838 } 839 break; 840 default: 841 error = EOPNOTSUPP; 842 break; 843 } 844 break; 845 case SOPT_SET: 846 default: 847 error = EOPNOTSUPP; 848 break; 849 } 850 851 done: 852 unp_reltoken(unp); 853 lwkt_reltoken(&unp_token); 854 855 lwkt_replymsg(&msg->lmsg, error); 856 } 857 858 /* 859 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 860 * for stream sockets, although the total for sender and receiver is 861 * actually only PIPSIZ. 862 * 863 * Datagram sockets really use the sendspace as the maximum datagram size, 864 * and don't really want to reserve the sendspace. Their recvspace should 865 * be large enough for at least one max-size datagram plus address. 866 * 867 * We want the local send/recv space to be significant larger then lo0's 868 * mtu of 16384. 869 */ 870 #ifndef PIPSIZ 871 #define PIPSIZ 57344 872 #endif 873 static u_long unpst_sendspace = PIPSIZ; 874 static u_long unpst_recvspace = PIPSIZ; 875 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 876 static u_long unpdg_recvspace = 4*1024; 877 878 static int unp_rights; /* file descriptors in flight */ 879 static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin, "unp_spin"); 880 881 SYSCTL_DECL(_net_local_seqpacket); 882 SYSCTL_DECL(_net_local_stream); 883 SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 884 &unpst_sendspace, 0, "Size of stream socket send buffer"); 885 SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 886 &unpst_recvspace, 0, "Size of stream socket receive buffer"); 887 888 SYSCTL_DECL(_net_local_dgram); 889 SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 890 &unpdg_sendspace, 0, "Max datagram socket size"); 891 SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 892 &unpdg_recvspace, 0, "Size of datagram socket receive buffer"); 893 894 SYSCTL_DECL(_net_local); 895 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, 896 "File descriptors in flight"); 897 898 static int 899 unp_attach(struct socket *so, struct pru_attach_info *ai) 900 { 901 struct unp_global_head *head; 902 struct unpcb *unp; 903 int error; 904 905 lwkt_gettoken(&unp_token); 906 907 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) { 908 switch (so->so_type) { 909 case SOCK_STREAM: 910 case SOCK_SEQPACKET: 911 error = soreserve(so, unpst_sendspace, unpst_recvspace, 912 ai->sb_rlimit); 913 break; 914 915 case SOCK_DGRAM: 916 error = soreserve(so, unpdg_sendspace, unpdg_recvspace, 917 ai->sb_rlimit); 918 break; 919 920 default: 921 panic("unp_attach"); 922 } 923 if (error) 924 goto failed; 925 } 926 927 /* 928 * In order to support sendfile we have to set either SSB_STOPSUPP 929 * or SSB_PREALLOC. Unix domain sockets use the SSB_STOP flow 930 * control mechanism. 931 */ 932 if (so->so_type == SOCK_STREAM) { 933 atomic_set_int(&so->so_rcv.ssb_flags, SSB_STOPSUPP); 934 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOPSUPP); 935 } 936 937 unp = kmalloc(sizeof(*unp), M_UNPCB, M_WAITOK | M_ZERO | M_NULLOK); 938 if (unp == NULL) { 939 error = ENOBUFS; 940 goto failed; 941 } 942 unp->unp_refcnt = 1; 943 unp->unp_gencnt = ++unp_gencnt; 944 LIST_INIT(&unp->unp_refs); 945 unp->unp_socket = so; 946 unp->unp_rvnode = ai->fd_rdir; /* jail cruft XXX JH */ 947 so->so_pcb = (caddr_t)unp; 948 soreference(so); 949 950 head = unp_globalhead(so->so_type); 951 TAILQ_INSERT_TAIL(&head->list, unp, unp_link); 952 head->count++; 953 error = 0; 954 failed: 955 lwkt_reltoken(&unp_token); 956 return error; 957 } 958 959 static void 960 unp_detach(struct unpcb *unp) 961 { 962 struct unp_global_head *head; 963 struct socket *so; 964 965 lwkt_gettoken(&unp_token); 966 lwkt_getpooltoken(unp); 967 968 so = unp->unp_socket; 969 970 head = unp_globalhead(so->so_type); 971 KASSERT(head->count > 0, ("invalid unp count")); 972 TAILQ_REMOVE(&head->list, unp, unp_link); 973 head->count--; 974 975 unp->unp_gencnt = ++unp_gencnt; 976 if (unp->unp_vnode) { 977 unp->unp_vnode->v_socket = NULL; 978 vrele(unp->unp_vnode); 979 unp->unp_vnode = NULL; 980 } 981 soisdisconnected(so); 982 soreference(so); /* for delayed sorflush */ 983 KKASSERT(so->so_pcb == unp); 984 so->so_pcb = NULL; /* both tokens required */ 985 unp->unp_socket = NULL; 986 sofree(so); /* remove pcb ref */ 987 988 if (unp_rights) { 989 /* 990 * Normally the receive buffer is flushed later, 991 * in sofree, but if our receive buffer holds references 992 * to descriptors that are now garbage, we will dispose 993 * of those descriptor references after the garbage collector 994 * gets them (resulting in a "panic: closef: count < 0"). 995 */ 996 sorflush(so); 997 unp_gc(); 998 } 999 sofree(so); 1000 lwkt_relpooltoken(unp); 1001 lwkt_reltoken(&unp_token); 1002 1003 KASSERT(unp->unp_conn == NULL, ("unp is still connected")); 1004 KASSERT(LIST_EMPTY(&unp->unp_refs), ("unp still has references")); 1005 1006 if (unp->unp_addr) 1007 kfree(unp->unp_addr, M_SONAME); 1008 kfree(unp, M_UNPCB); 1009 } 1010 1011 static int 1012 unp_bind(struct unpcb *unp, struct sockaddr *nam, struct thread *td) 1013 { 1014 struct proc *p = td->td_proc; 1015 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 1016 struct vnode *vp; 1017 struct vattr vattr; 1018 int error, namelen; 1019 struct nlookupdata nd; 1020 char buf[SOCK_MAXADDRLEN]; 1021 1022 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1023 UNP_ASSERT_TOKEN_HELD(unp); 1024 1025 if (unp->unp_vnode != NULL) 1026 return EINVAL; 1027 1028 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 1029 if (namelen <= 0) 1030 return EINVAL; 1031 strncpy(buf, soun->sun_path, namelen); 1032 buf[namelen] = 0; /* null-terminate the string */ 1033 error = nlookup_init(&nd, buf, UIO_SYSSPACE, 1034 NLC_LOCKVP | NLC_CREATE | NLC_REFDVP); 1035 if (error == 0) 1036 error = nlookup(&nd); 1037 if (error == 0 && nd.nl_nch.ncp->nc_vp != NULL) 1038 error = EADDRINUSE; 1039 if (error) 1040 goto done; 1041 1042 VATTR_NULL(&vattr); 1043 vattr.va_type = VSOCK; 1044 vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask); 1045 error = VOP_NCREATE(&nd.nl_nch, nd.nl_dvp, &vp, nd.nl_cred, &vattr); 1046 if (error == 0) { 1047 if (unp->unp_vnode == NULL) { 1048 vp->v_socket = unp->unp_socket; 1049 unp->unp_vnode = vp; 1050 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam); 1051 vn_unlock(vp); 1052 } else { 1053 vput(vp); /* late race */ 1054 error = EINVAL; 1055 } 1056 } 1057 done: 1058 nlookup_done(&nd); 1059 return (error); 1060 } 1061 1062 static int 1063 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1064 { 1065 struct unpcb *unp, *unp2; 1066 int error, flags = 0; 1067 1068 lwkt_gettoken(&unp_token); 1069 1070 unp = unp_getsocktoken(so); 1071 if (!UNP_ISATTACHED(unp)) { 1072 error = EINVAL; 1073 goto failed; 1074 } 1075 1076 if ((unp->unp_flags & UNP_CONNECTING) || unp->unp_conn != NULL) { 1077 error = EISCONN; 1078 goto failed; 1079 } 1080 1081 flags = UNP_CONNECTING; 1082 unp_setflags(unp, flags); 1083 1084 error = unp_find_lockref(nam, td, so->so_type, &unp2); 1085 if (error) 1086 goto failed; 1087 /* 1088 * NOTE: 1089 * unp2 is locked and referenced. 1090 */ 1091 1092 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 1093 struct socket *so2, *so3; 1094 struct unpcb *unp3; 1095 1096 so2 = unp2->unp_socket; 1097 if (!(so2->so_options & SO_ACCEPTCONN) || 1098 (so3 = sonewconn_faddr(so2, 0, NULL, 1099 TRUE /* keep ref */)) == NULL) { 1100 error = ECONNREFUSED; 1101 goto done; 1102 } 1103 /* so3 has a socket reference. */ 1104 1105 unp3 = unp_getsocktoken(so3); 1106 if (!UNP_ISATTACHED(unp3)) { 1107 unp_reltoken(unp3); 1108 /* 1109 * Already aborted; we only need to drop the 1110 * socket reference held by sonewconn_faddr(). 1111 */ 1112 sofree(so3); 1113 error = ECONNREFUSED; 1114 goto done; 1115 } 1116 unp_reference(unp3); 1117 /* 1118 * NOTE: 1119 * unp3 is locked and referenced. 1120 */ 1121 1122 /* 1123 * Release so3 socket reference held by sonewconn_faddr(). 1124 * Since we have referenced unp3, neither unp3 nor so3 will 1125 * be destroyed here. 1126 */ 1127 sofree(so3); 1128 1129 if (unp2->unp_addr != NULL) { 1130 unp3->unp_addr = (struct sockaddr_un *) 1131 dup_sockaddr((struct sockaddr *)unp2->unp_addr); 1132 } 1133 1134 /* 1135 * unp_peercred management: 1136 * 1137 * The connecter's (client's) credentials are copied 1138 * from its process structure at the time of connect() 1139 * (which is now). 1140 */ 1141 cru2x(td->td_proc->p_ucred, &unp3->unp_peercred); 1142 unp_setflags(unp3, UNP_HAVEPC); 1143 /* 1144 * The receiver's (server's) credentials are copied 1145 * from the unp_peercred member of socket on which the 1146 * former called listen(); unp_listen() cached that 1147 * process's credentials at that time so we can use 1148 * them now. 1149 */ 1150 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED, 1151 ("unp_connect: listener without cached peercred")); 1152 memcpy(&unp->unp_peercred, &unp2->unp_peercred, 1153 sizeof(unp->unp_peercred)); 1154 unp_setflags(unp, UNP_HAVEPC); 1155 1156 error = unp_connect_pair(unp, unp3); 1157 if (error) { 1158 /* XXX we need a better name */ 1159 soabort_oncpu(so3); 1160 } 1161 1162 /* Done with unp3 */ 1163 unp_free(unp3); 1164 unp_reltoken(unp3); 1165 } else { 1166 error = unp_connect_pair(unp, unp2); 1167 } 1168 done: 1169 unp_free(unp2); 1170 unp_reltoken(unp2); 1171 failed: 1172 if (flags) 1173 unp_clrflags(unp, flags); 1174 unp_reltoken(unp); 1175 1176 lwkt_reltoken(&unp_token); 1177 return (error); 1178 } 1179 1180 /* 1181 * Connect two unix domain sockets together. 1182 * 1183 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1184 * pool token also be held. 1185 */ 1186 int 1187 unp_connect2(struct socket *so, struct socket *so2) 1188 { 1189 struct unpcb *unp, *unp2; 1190 int error; 1191 1192 lwkt_gettoken(&unp_token); 1193 if (so2->so_type != so->so_type) { 1194 lwkt_reltoken(&unp_token); 1195 return (EPROTOTYPE); 1196 } 1197 unp = unp_getsocktoken(so); 1198 unp2 = unp_getsocktoken(so2); 1199 1200 if (!UNP_ISATTACHED(unp)) { 1201 error = EINVAL; 1202 goto done; 1203 } 1204 if (!UNP_ISATTACHED(unp2)) { 1205 error = ECONNREFUSED; 1206 goto done; 1207 } 1208 1209 if (unp->unp_conn != NULL) { 1210 error = EISCONN; 1211 goto done; 1212 } 1213 if ((so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET) && 1214 unp2->unp_conn != NULL) { 1215 error = EISCONN; 1216 goto done; 1217 } 1218 1219 error = unp_connect_pair(unp, unp2); 1220 done: 1221 unp_reltoken(unp2); 1222 unp_reltoken(unp); 1223 lwkt_reltoken(&unp_token); 1224 return (error); 1225 } 1226 1227 /* 1228 * Disconnect a unix domain socket pair. 1229 * 1230 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1231 * pool token also be held. 1232 */ 1233 static void 1234 unp_disconnect(struct unpcb *unp, int error) 1235 { 1236 struct socket *so = unp->unp_socket; 1237 struct unpcb *unp2; 1238 1239 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1240 UNP_ASSERT_TOKEN_HELD(unp); 1241 1242 if (error) 1243 so->so_error = error; 1244 1245 while ((unp2 = unp->unp_conn) != NULL) { 1246 lwkt_getpooltoken(unp2); 1247 if (unp2 == unp->unp_conn) 1248 break; 1249 lwkt_relpooltoken(unp2); 1250 } 1251 if (unp2 == NULL) 1252 return; 1253 /* unp2 is locked. */ 1254 1255 KASSERT((unp2->unp_flags & UNP_DROPPED) == 0, ("unp2 was dropped")); 1256 1257 unp->unp_conn = NULL; 1258 1259 switch (so->so_type) { 1260 case SOCK_DGRAM: 1261 LIST_REMOVE(unp, unp_reflink); 1262 soclrstate(so, SS_ISCONNECTED); 1263 break; 1264 1265 case SOCK_STREAM: 1266 case SOCK_SEQPACKET: 1267 /* 1268 * Keep a reference before clearing the unp_conn 1269 * to avoid racing uipc_detach()/uipc_abort() in 1270 * other thread. 1271 */ 1272 unp_reference(unp2); 1273 KASSERT(unp2->unp_conn == unp, ("unp_conn mismatch")); 1274 unp2->unp_conn = NULL; 1275 1276 soisdisconnected(so); 1277 soisdisconnected(unp2->unp_socket); 1278 1279 unp_free(unp2); 1280 break; 1281 } 1282 1283 lwkt_relpooltoken(unp2); 1284 } 1285 1286 #ifdef notdef 1287 void 1288 unp_abort(struct unpcb *unp) 1289 { 1290 lwkt_gettoken(&unp_token); 1291 unp_free(unp); 1292 lwkt_reltoken(&unp_token); 1293 } 1294 #endif 1295 1296 static int 1297 prison_unpcb(struct thread *td, struct unpcb *unp) 1298 { 1299 struct proc *p; 1300 1301 if (td == NULL) 1302 return (0); 1303 if ((p = td->td_proc) == NULL) 1304 return (0); 1305 if (!p->p_ucred->cr_prison) 1306 return (0); 1307 if (p->p_fd->fd_rdir == unp->unp_rvnode) 1308 return (0); 1309 return (1); 1310 } 1311 1312 static int 1313 unp_pcblist(SYSCTL_HANDLER_ARGS) 1314 { 1315 struct unp_global_head *head = arg1; 1316 int error, i, n; 1317 struct unpcb *unp, *marker; 1318 1319 KKASSERT(curproc != NULL); 1320 1321 /* 1322 * The process of preparing the PCB list is too time-consuming and 1323 * resource-intensive to repeat twice on every request. 1324 */ 1325 if (req->oldptr == NULL) { 1326 n = head->count; 1327 req->oldidx = (n + n/8) * sizeof(struct xunpcb); 1328 return 0; 1329 } 1330 1331 if (req->newptr != NULL) 1332 return EPERM; 1333 1334 marker = kmalloc(sizeof(*marker), M_UNPCB, M_WAITOK | M_ZERO); 1335 marker->unp_flags |= UNP_MARKER; 1336 1337 lwkt_gettoken(&unp_token); 1338 1339 n = head->count; 1340 i = 0; 1341 error = 0; 1342 1343 TAILQ_INSERT_HEAD(&head->list, marker, unp_link); 1344 while ((unp = TAILQ_NEXT(marker, unp_link)) != NULL && i < n) { 1345 struct xunpcb xu; 1346 1347 TAILQ_REMOVE(&head->list, marker, unp_link); 1348 TAILQ_INSERT_AFTER(&head->list, unp, marker, unp_link); 1349 1350 if (unp->unp_flags & UNP_MARKER) 1351 continue; 1352 if (prison_unpcb(req->td, unp)) 1353 continue; 1354 1355 xu.xu_len = sizeof(xu); 1356 xu.xu_unpp = unp; 1357 1358 /* 1359 * NOTE: 1360 * unp->unp_addr and unp->unp_conn are protected by 1361 * unp_token. So if we want to get rid of unp_token 1362 * or reduce the coverage of unp_token, care must be 1363 * taken. 1364 */ 1365 if (unp->unp_addr) { 1366 bcopy(unp->unp_addr, &xu.xu_addr, 1367 unp->unp_addr->sun_len); 1368 } 1369 if (unp->unp_conn && unp->unp_conn->unp_addr) { 1370 bcopy(unp->unp_conn->unp_addr, 1371 &xu.xu_caddr, 1372 unp->unp_conn->unp_addr->sun_len); 1373 } 1374 bcopy(unp, &xu.xu_unp, sizeof(*unp)); 1375 sotoxsocket(unp->unp_socket, &xu.xu_socket); 1376 1377 /* NOTE: This could block and temporarily release unp_token */ 1378 error = SYSCTL_OUT(req, &xu, sizeof(xu)); 1379 if (error) 1380 break; 1381 ++i; 1382 } 1383 TAILQ_REMOVE(&head->list, marker, unp_link); 1384 1385 lwkt_reltoken(&unp_token); 1386 1387 kfree(marker, M_UNPCB); 1388 return error; 1389 } 1390 1391 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD, 1392 &unp_dgram_head, 0, unp_pcblist, "S,xunpcb", 1393 "List of active local datagram sockets"); 1394 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD, 1395 &unp_stream_head, 0, unp_pcblist, "S,xunpcb", 1396 "List of active local stream sockets"); 1397 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLFLAG_RD, 1398 &unp_seqpkt_head, 0, unp_pcblist, "S,xunpcb", 1399 "List of active local seqpacket sockets"); 1400 1401 static void 1402 unp_shutdown(struct unpcb *unp) 1403 { 1404 struct socket *so; 1405 1406 if ((unp->unp_socket->so_type == SOCK_STREAM || 1407 unp->unp_socket->so_type == SOCK_SEQPACKET) && 1408 unp->unp_conn != NULL && (so = unp->unp_conn->unp_socket)) { 1409 socantrcvmore(so); 1410 } 1411 } 1412 1413 #ifdef notdef 1414 void 1415 unp_drain(void) 1416 { 1417 lwkt_gettoken(&unp_token); 1418 lwkt_reltoken(&unp_token); 1419 } 1420 #endif 1421 1422 int 1423 unp_externalize(struct mbuf *rights) 1424 { 1425 struct thread *td = curthread; 1426 struct proc *p = td->td_proc; /* XXX */ 1427 struct lwp *lp = td->td_lwp; 1428 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 1429 int *fdp; 1430 int i; 1431 struct file **rp; 1432 struct file *fp; 1433 int newfds = (cm->cmsg_len - (CMSG_DATA(cm) - (u_char *)cm)) 1434 / sizeof(struct file *); 1435 int f; 1436 1437 /* 1438 * if the new FD's will not fit, then we free them all 1439 */ 1440 if (!fdavail(p, newfds)) { 1441 rp = (struct file **)CMSG_DATA(cm); 1442 for (i = 0; i < newfds; i++) { 1443 fp = *rp; 1444 /* 1445 * zero the pointer before calling unp_discard, 1446 * since it may end up in unp_gc().. 1447 */ 1448 *rp++ = NULL; 1449 unp_discard(fp, NULL); 1450 } 1451 return (EMSGSIZE); 1452 } 1453 1454 /* 1455 * now change each pointer to an fd in the global table to 1456 * an integer that is the index to the local fd table entry 1457 * that we set up to point to the global one we are transferring. 1458 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1459 * then do it in forward order. In that case, an integer will 1460 * always come in the same place or before its corresponding 1461 * struct file pointer. 1462 * If sizeof (struct file *) is smaller than sizeof int, then 1463 * do it in reverse order. 1464 * 1465 * Hold revoke_token in 'shared' mode, so that we won't miss 1466 * the FREVOKED update on fps being externalized (fsetfd). 1467 */ 1468 lwkt_gettoken_shared(&revoke_token); 1469 if (sizeof(struct file *) >= sizeof(int)) { 1470 fdp = (int *)CMSG_DATA(cm); 1471 rp = (struct file **)CMSG_DATA(cm); 1472 for (i = 0; i < newfds; i++) { 1473 if (fdalloc(p, 0, &f)) { 1474 int j; 1475 1476 /* 1477 * Previous fdavail() can't garantee 1478 * fdalloc() success due to SMP race. 1479 * Just clean up and return the same 1480 * error value as if fdavail() failed. 1481 */ 1482 1483 /* Close externalized files */ 1484 for (j = 0; j < i; j++) 1485 kern_close(fdp[j]); 1486 /* Discard the rest of internal files */ 1487 for (; i < newfds; i++) 1488 unp_discard(rp[i], NULL); 1489 /* Wipe out the control message */ 1490 for (i = 0; i < newfds; i++) 1491 rp[i] = NULL; 1492 1493 lwkt_reltoken(&revoke_token); 1494 return (EMSGSIZE); 1495 } 1496 fp = rp[i]; 1497 unp_fp_externalize(lp, fp, f); 1498 fdp[i] = f; 1499 } 1500 } else { 1501 /* 1502 * XXX 1503 * Will this ever happen? I don't think compiler will 1504 * generate code for this code segment -- sephe 1505 */ 1506 fdp = (int *)CMSG_DATA(cm) + newfds - 1; 1507 rp = (struct file **)CMSG_DATA(cm) + newfds - 1; 1508 for (i = 0; i < newfds; i++) { 1509 if (fdalloc(p, 0, &f)) 1510 panic("unp_externalize"); 1511 fp = *rp--; 1512 unp_fp_externalize(lp, fp, f); 1513 *fdp-- = f; 1514 } 1515 } 1516 lwkt_reltoken(&revoke_token); 1517 1518 /* 1519 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1520 * differs. 1521 */ 1522 cm->cmsg_len = CMSG_LEN(newfds * sizeof(int)); 1523 rights->m_len = cm->cmsg_len; 1524 1525 return (0); 1526 } 1527 1528 static void 1529 unp_fp_externalize(struct lwp *lp, struct file *fp, int fd) 1530 { 1531 if (lp) { 1532 KKASSERT(fd >= 0); 1533 if (fp->f_flag & FREVOKED) { 1534 struct file *fx; 1535 int error; 1536 1537 kprintf("Warning: revoked fp exiting unix socket\n"); 1538 error = falloc(lp, &fx, NULL); 1539 if (error == 0) { 1540 fsetfd(lp->lwp_proc->p_fd, fx, fd); 1541 fdrop(fx); 1542 } else { 1543 fsetfd(lp->lwp_proc->p_fd, NULL, fd); 1544 } 1545 } else { 1546 fsetfd(lp->lwp_proc->p_fd, fp, fd); 1547 } 1548 } 1549 spin_lock(&unp_spin); 1550 fp->f_msgcount--; 1551 unp_rights--; 1552 spin_unlock(&unp_spin); 1553 fdrop(fp); 1554 } 1555 1556 void 1557 unp_init(void) 1558 { 1559 TAILQ_INIT(&unp_stream_head.list); 1560 TAILQ_INIT(&unp_dgram_head.list); 1561 TAILQ_INIT(&unp_seqpkt_head.list); 1562 1563 spin_init(&unp_spin, "unpinit"); 1564 1565 SLIST_INIT(&unp_defdiscard_head); 1566 spin_init(&unp_defdiscard_spin, "unpdisc"); 1567 TASK_INIT(&unp_defdiscard_task, 0, unp_defdiscard_taskfunc, NULL); 1568 1569 /* 1570 * Create taskqueue for defered discard, and stick it to 1571 * the last CPU. 1572 */ 1573 unp_taskqueue = taskqueue_create("unp_taskq", M_WAITOK, 1574 taskqueue_thread_enqueue, &unp_taskqueue); 1575 taskqueue_start_threads(&unp_taskqueue, 1, TDPRI_KERN_DAEMON, 1576 ncpus - 1, "unp taskq"); 1577 } 1578 1579 static int 1580 unp_internalize(struct mbuf *control, struct thread *td) 1581 { 1582 struct proc *p = td->td_proc; 1583 struct filedesc *fdescp; 1584 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 1585 struct file **rp; 1586 struct file *fp; 1587 int i, fd, *fdp; 1588 struct cmsgcred *cmcred; 1589 int oldfds; 1590 u_int newlen; 1591 int error; 1592 1593 KKASSERT(p); 1594 1595 if ((cm->cmsg_type != SCM_RIGHTS && cm->cmsg_type != SCM_CREDS) || 1596 cm->cmsg_level != SOL_SOCKET || 1597 CMSG_ALIGN(cm->cmsg_len) != control->m_len) 1598 return EINVAL; 1599 1600 /* 1601 * Fill in credential information. 1602 */ 1603 if (cm->cmsg_type == SCM_CREDS) { 1604 cmcred = (struct cmsgcred *)CMSG_DATA(cm); 1605 cmcred->cmcred_pid = p->p_pid; 1606 cmcred->cmcred_uid = p->p_ucred->cr_ruid; 1607 cmcred->cmcred_gid = p->p_ucred->cr_rgid; 1608 cmcred->cmcred_euid = p->p_ucred->cr_uid; 1609 cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups, 1610 CMGROUP_MAX); 1611 for (i = 0; i < cmcred->cmcred_ngroups; i++) 1612 cmcred->cmcred_groups[i] = p->p_ucred->cr_groups[i]; 1613 return 0; 1614 } 1615 1616 /* 1617 * cmsghdr may not be aligned, do not allow calculation(s) to 1618 * go negative. 1619 */ 1620 if (cm->cmsg_len < CMSG_LEN(0)) 1621 return EINVAL; 1622 1623 oldfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof(int); 1624 1625 /* 1626 * Now replace the integer FDs with pointers to 1627 * the associated global file table entry.. 1628 * Allocate a bigger buffer as necessary. But if an cluster is not 1629 * enough, return E2BIG. 1630 */ 1631 newlen = CMSG_LEN(oldfds * sizeof(struct file *)); 1632 if (newlen > MCLBYTES) 1633 return E2BIG; 1634 if (newlen - control->m_len > M_TRAILINGSPACE(control)) { 1635 if (control->m_flags & M_EXT) 1636 return E2BIG; 1637 MCLGET(control, M_WAITOK); 1638 if (!(control->m_flags & M_EXT)) 1639 return ENOBUFS; 1640 1641 /* copy the data to the cluster */ 1642 memcpy(mtod(control, char *), cm, cm->cmsg_len); 1643 cm = mtod(control, struct cmsghdr *); 1644 } 1645 1646 fdescp = p->p_fd; 1647 spin_lock_shared(&fdescp->fd_spin); 1648 1649 /* 1650 * check that all the FDs passed in refer to legal OPEN files 1651 * If not, reject the entire operation. 1652 */ 1653 fdp = (int *)CMSG_DATA(cm); 1654 for (i = 0; i < oldfds; i++) { 1655 fd = *fdp++; 1656 if ((unsigned)fd >= fdescp->fd_nfiles || 1657 fdescp->fd_files[fd].fp == NULL) { 1658 error = EBADF; 1659 goto done; 1660 } 1661 if (fdescp->fd_files[fd].fp->f_type == DTYPE_KQUEUE) { 1662 error = EOPNOTSUPP; 1663 goto done; 1664 } 1665 } 1666 1667 /* 1668 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1669 * differs. 1670 */ 1671 cm->cmsg_len = newlen; 1672 control->m_len = CMSG_ALIGN(newlen); 1673 1674 /* 1675 * Transform the file descriptors into struct file pointers. 1676 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1677 * then do it in reverse order so that the int won't get until 1678 * we're done. 1679 * If sizeof (struct file *) is smaller than sizeof int, then 1680 * do it in forward order. 1681 */ 1682 if (sizeof(struct file *) >= sizeof(int)) { 1683 fdp = (int *)CMSG_DATA(cm) + oldfds - 1; 1684 rp = (struct file **)CMSG_DATA(cm) + oldfds - 1; 1685 for (i = 0; i < oldfds; i++) { 1686 fp = fdescp->fd_files[*fdp--].fp; 1687 *rp-- = fp; 1688 fhold(fp); 1689 spin_lock(&unp_spin); 1690 fp->f_msgcount++; 1691 unp_rights++; 1692 spin_unlock(&unp_spin); 1693 } 1694 } else { 1695 /* 1696 * XXX 1697 * Will this ever happen? I don't think compiler will 1698 * generate code for this code segment -- sephe 1699 */ 1700 fdp = (int *)CMSG_DATA(cm); 1701 rp = (struct file **)CMSG_DATA(cm); 1702 for (i = 0; i < oldfds; i++) { 1703 fp = fdescp->fd_files[*fdp++].fp; 1704 *rp++ = fp; 1705 fhold(fp); 1706 spin_lock(&unp_spin); 1707 fp->f_msgcount++; 1708 unp_rights++; 1709 spin_unlock(&unp_spin); 1710 } 1711 } 1712 error = 0; 1713 done: 1714 spin_unlock_shared(&fdescp->fd_spin); 1715 return error; 1716 } 1717 1718 /* 1719 * Garbage collect in-transit file descriptors that get lost due to 1720 * loops (i.e. when a socket is sent to another process over itself, 1721 * and more complex situations). 1722 * 1723 * NOT MPSAFE - TODO socket flush code and maybe closef. Rest is MPSAFE. 1724 */ 1725 1726 struct unp_gc_info { 1727 struct file **extra_ref; 1728 struct file *locked_fp; 1729 int defer; 1730 int index; 1731 int maxindex; 1732 }; 1733 1734 static void 1735 unp_gc(void) 1736 { 1737 struct unp_gc_info info; 1738 static boolean_t unp_gcing; 1739 struct file **fpp; 1740 int i; 1741 1742 /* 1743 * Only one gc can be in-progress at any given moment 1744 */ 1745 spin_lock(&unp_spin); 1746 if (unp_gcing) { 1747 spin_unlock(&unp_spin); 1748 return; 1749 } 1750 unp_gcing = TRUE; 1751 spin_unlock(&unp_spin); 1752 1753 lwkt_gettoken(&unp_token); 1754 1755 /* 1756 * Before going through all this, set all FDs to be NOT defered 1757 * and NOT externally accessible (not marked). During the scan 1758 * a fd can be marked externally accessible but we may or may not 1759 * be able to immediately process it (controlled by FDEFER). 1760 * 1761 * If we loop sleep a bit. The complexity of the topology can cause 1762 * multiple loops. Also failure to acquire the socket's so_rcv 1763 * token can cause us to loop. 1764 */ 1765 allfiles_scan_exclusive(unp_gc_clearmarks, NULL); 1766 do { 1767 info.defer = 0; 1768 allfiles_scan_exclusive(unp_gc_checkmarks, &info); 1769 if (info.defer) 1770 tsleep(&info, 0, "gcagain", 1); 1771 } while (info.defer); 1772 1773 /* 1774 * We grab an extra reference to each of the file table entries 1775 * that are not otherwise accessible and then free the rights 1776 * that are stored in messages on them. 1777 * 1778 * The bug in the orginal code is a little tricky, so I'll describe 1779 * what's wrong with it here. 1780 * 1781 * It is incorrect to simply unp_discard each entry for f_msgcount 1782 * times -- consider the case of sockets A and B that contain 1783 * references to each other. On a last close of some other socket, 1784 * we trigger a gc since the number of outstanding rights (unp_rights) 1785 * is non-zero. If during the sweep phase the gc code un_discards, 1786 * we end up doing a (full) closef on the descriptor. A closef on A 1787 * results in the following chain. Closef calls soo_close, which 1788 * calls soclose. Soclose calls first (through the switch 1789 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply 1790 * returns because the previous instance had set unp_gcing, and 1791 * we return all the way back to soclose, which marks the socket 1792 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush 1793 * to free up the rights that are queued in messages on the socket A, 1794 * i.e., the reference on B. The sorflush calls via the dom_dispose 1795 * switch unp_dispose, which unp_scans with unp_discard. This second 1796 * instance of unp_discard just calls closef on B. 1797 * 1798 * Well, a similar chain occurs on B, resulting in a sorflush on B, 1799 * which results in another closef on A. Unfortunately, A is already 1800 * being closed, and the descriptor has already been marked with 1801 * SS_NOFDREF, and soclose panics at this point. 1802 * 1803 * Here, we first take an extra reference to each inaccessible 1804 * descriptor. Then, we call sorflush ourself, since we know 1805 * it is a Unix domain socket anyhow. After we destroy all the 1806 * rights carried in messages, we do a last closef to get rid 1807 * of our extra reference. This is the last close, and the 1808 * unp_detach etc will shut down the socket. 1809 * 1810 * 91/09/19, bsy@cs.cmu.edu 1811 */ 1812 info.extra_ref = kmalloc(256 * sizeof(struct file *), M_FILE, M_WAITOK); 1813 info.maxindex = 256; 1814 1815 do { 1816 /* 1817 * Look for matches 1818 */ 1819 info.index = 0; 1820 allfiles_scan_exclusive(unp_gc_checkrefs, &info); 1821 1822 /* 1823 * For each FD on our hit list, do the following two things 1824 */ 1825 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) { 1826 struct file *tfp = *fpp; 1827 if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL) 1828 sorflush((struct socket *)(tfp->f_data)); 1829 } 1830 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) 1831 closef(*fpp, NULL); 1832 } while (info.index == info.maxindex); 1833 1834 lwkt_reltoken(&unp_token); 1835 1836 kfree((caddr_t)info.extra_ref, M_FILE); 1837 unp_gcing = FALSE; 1838 } 1839 1840 /* 1841 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1842 */ 1843 static int 1844 unp_gc_checkrefs(struct file *fp, void *data) 1845 { 1846 struct unp_gc_info *info = data; 1847 1848 if (fp->f_count == 0) 1849 return(0); 1850 if (info->index == info->maxindex) 1851 return(-1); 1852 1853 /* 1854 * If all refs are from msgs, and it's not marked accessible 1855 * then it must be referenced from some unreachable cycle 1856 * of (shut-down) FDs, so include it in our 1857 * list of FDs to remove 1858 */ 1859 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) { 1860 info->extra_ref[info->index++] = fp; 1861 fhold(fp); 1862 } 1863 return(0); 1864 } 1865 1866 /* 1867 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1868 */ 1869 static int 1870 unp_gc_clearmarks(struct file *fp, void *data __unused) 1871 { 1872 atomic_clear_int(&fp->f_flag, FMARK | FDEFER); 1873 return(0); 1874 } 1875 1876 /* 1877 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1878 */ 1879 static int 1880 unp_gc_checkmarks(struct file *fp, void *data) 1881 { 1882 struct unp_gc_info *info = data; 1883 struct socket *so; 1884 1885 /* 1886 * If the file is not open, skip it. Make sure it isn't marked 1887 * defered or we could loop forever, in case we somehow race 1888 * something. 1889 */ 1890 if (fp->f_count == 0) { 1891 if (fp->f_flag & FDEFER) 1892 atomic_clear_int(&fp->f_flag, FDEFER); 1893 return(0); 1894 } 1895 /* 1896 * If we already marked it as 'defer' in a 1897 * previous pass, then try process it this time 1898 * and un-mark it 1899 */ 1900 if (fp->f_flag & FDEFER) { 1901 atomic_clear_int(&fp->f_flag, FDEFER); 1902 } else { 1903 /* 1904 * if it's not defered, then check if it's 1905 * already marked.. if so skip it 1906 */ 1907 if (fp->f_flag & FMARK) 1908 return(0); 1909 /* 1910 * If all references are from messages 1911 * in transit, then skip it. it's not 1912 * externally accessible. 1913 */ 1914 if (fp->f_count == fp->f_msgcount) 1915 return(0); 1916 /* 1917 * If it got this far then it must be 1918 * externally accessible. 1919 */ 1920 atomic_set_int(&fp->f_flag, FMARK); 1921 } 1922 1923 /* 1924 * either it was defered, or it is externally 1925 * accessible and not already marked so. 1926 * Now check if it is possibly one of OUR sockets. 1927 */ 1928 if (fp->f_type != DTYPE_SOCKET || 1929 (so = (struct socket *)fp->f_data) == NULL) { 1930 return(0); 1931 } 1932 if (so->so_proto->pr_domain != &localdomain || 1933 !(so->so_proto->pr_flags & PR_RIGHTS)) { 1934 return(0); 1935 } 1936 1937 /* 1938 * So, Ok, it's one of our sockets and it IS externally accessible 1939 * (or was defered). Now we look to see if we hold any file 1940 * descriptors in its message buffers. Follow those links and mark 1941 * them as accessible too. 1942 * 1943 * We are holding multiple spinlocks here, if we cannot get the 1944 * token non-blocking defer until the next loop. 1945 */ 1946 info->locked_fp = fp; 1947 if (lwkt_trytoken(&so->so_rcv.ssb_token)) { 1948 unp_scan(so->so_rcv.ssb_mb, unp_mark, info); 1949 lwkt_reltoken(&so->so_rcv.ssb_token); 1950 } else { 1951 atomic_set_int(&fp->f_flag, FDEFER); 1952 ++info->defer; 1953 } 1954 return (0); 1955 } 1956 1957 /* 1958 * Dispose of the fp's stored in a mbuf. 1959 * 1960 * The dds loop can cause additional fps to be entered onto the 1961 * list while it is running, flattening out the operation and avoiding 1962 * a deep kernel stack recursion. 1963 */ 1964 void 1965 unp_dispose(struct mbuf *m) 1966 { 1967 lwkt_gettoken(&unp_token); 1968 if (m) 1969 unp_scan(m, unp_discard, NULL); 1970 lwkt_reltoken(&unp_token); 1971 } 1972 1973 static int 1974 unp_listen(struct unpcb *unp, struct thread *td) 1975 { 1976 struct proc *p = td->td_proc; 1977 1978 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1979 UNP_ASSERT_TOKEN_HELD(unp); 1980 1981 KKASSERT(p); 1982 cru2x(p->p_ucred, &unp->unp_peercred); 1983 unp_setflags(unp, UNP_HAVEPCCACHED); 1984 return (0); 1985 } 1986 1987 static void 1988 unp_scan(struct mbuf *m0, void (*op)(struct file *, void *), void *data) 1989 { 1990 struct mbuf *m; 1991 struct file **rp; 1992 struct cmsghdr *cm; 1993 int i; 1994 int qfds; 1995 1996 while (m0) { 1997 for (m = m0; m; m = m->m_next) { 1998 if (m->m_type == MT_CONTROL && 1999 m->m_len >= sizeof(*cm)) { 2000 cm = mtod(m, struct cmsghdr *); 2001 if (cm->cmsg_level != SOL_SOCKET || 2002 cm->cmsg_type != SCM_RIGHTS) 2003 continue; 2004 qfds = (cm->cmsg_len - CMSG_LEN(0)) / 2005 sizeof(void *); 2006 rp = (struct file **)CMSG_DATA(cm); 2007 for (i = 0; i < qfds; i++) 2008 (*op)(*rp++, data); 2009 break; /* XXX, but saves time */ 2010 } 2011 } 2012 m0 = m0->m_nextpkt; 2013 } 2014 } 2015 2016 /* 2017 * Mark visibility. info->defer is recalculated on every pass. 2018 */ 2019 static void 2020 unp_mark(struct file *fp, void *data) 2021 { 2022 struct unp_gc_info *info = data; 2023 2024 if ((fp->f_flag & FMARK) == 0) { 2025 ++info->defer; 2026 atomic_set_int(&fp->f_flag, FMARK | FDEFER); 2027 } else if (fp->f_flag & FDEFER) { 2028 ++info->defer; 2029 } 2030 } 2031 2032 /* 2033 * Discard a fp previously held in a unix domain socket mbuf. To 2034 * avoid blowing out the kernel stack due to contrived chain-reactions 2035 * we may have to defer the operation to a higher procedural level. 2036 * 2037 * Caller holds unp_token 2038 */ 2039 static void 2040 unp_discard(struct file *fp, void *data __unused) 2041 { 2042 struct unp_defdiscard *d; 2043 2044 spin_lock(&unp_spin); 2045 fp->f_msgcount--; 2046 unp_rights--; 2047 spin_unlock(&unp_spin); 2048 2049 d = kmalloc(sizeof(*d), M_UNPCB, M_WAITOK); 2050 d->fp = fp; 2051 2052 spin_lock(&unp_defdiscard_spin); 2053 SLIST_INSERT_HEAD(&unp_defdiscard_head, d, next); 2054 spin_unlock(&unp_defdiscard_spin); 2055 2056 taskqueue_enqueue(unp_taskqueue, &unp_defdiscard_task); 2057 } 2058 2059 static int 2060 unp_find_lockref(struct sockaddr *nam, struct thread *td, short type, 2061 struct unpcb **unp_ret) 2062 { 2063 struct proc *p = td->td_proc; 2064 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 2065 struct vnode *vp = NULL; 2066 struct socket *so; 2067 struct unpcb *unp; 2068 int error, len; 2069 struct nlookupdata nd; 2070 char buf[SOCK_MAXADDRLEN]; 2071 2072 *unp_ret = NULL; 2073 2074 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 2075 if (len <= 0) { 2076 error = EINVAL; 2077 goto failed; 2078 } 2079 strncpy(buf, soun->sun_path, len); 2080 buf[len] = 0; 2081 2082 error = nlookup_init(&nd, buf, UIO_SYSSPACE, NLC_FOLLOW); 2083 if (error == 0) 2084 error = nlookup(&nd); 2085 if (error == 0) 2086 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2087 nlookup_done(&nd); 2088 if (error) { 2089 vp = NULL; 2090 goto failed; 2091 } 2092 2093 if (vp->v_type != VSOCK) { 2094 error = ENOTSOCK; 2095 goto failed; 2096 } 2097 error = VOP_EACCESS(vp, VWRITE, p->p_ucred); 2098 if (error) 2099 goto failed; 2100 so = vp->v_socket; 2101 if (so == NULL) { 2102 error = ECONNREFUSED; 2103 goto failed; 2104 } 2105 if (so->so_type != type) { 2106 error = EPROTOTYPE; 2107 goto failed; 2108 } 2109 2110 /* Lock this unp. */ 2111 unp = unp_getsocktoken(so); 2112 if (!UNP_ISATTACHED(unp)) { 2113 unp_reltoken(unp); 2114 error = ECONNREFUSED; 2115 goto failed; 2116 } 2117 /* And keep this unp referenced. */ 2118 unp_reference(unp); 2119 2120 /* Done! */ 2121 *unp_ret = unp; 2122 error = 0; 2123 failed: 2124 if (vp != NULL) 2125 vput(vp); 2126 return error; 2127 } 2128 2129 static int 2130 unp_connect_pair(struct unpcb *unp, struct unpcb *unp2) 2131 { 2132 struct socket *so = unp->unp_socket; 2133 struct socket *so2 = unp2->unp_socket; 2134 2135 ASSERT_LWKT_TOKEN_HELD(&unp_token); 2136 UNP_ASSERT_TOKEN_HELD(unp); 2137 UNP_ASSERT_TOKEN_HELD(unp2); 2138 2139 KASSERT(so->so_type == so2->so_type, 2140 ("socket type mismatch, so %d, so2 %d", so->so_type, so2->so_type)); 2141 2142 if (!UNP_ISATTACHED(unp)) 2143 return EINVAL; 2144 if (!UNP_ISATTACHED(unp2)) 2145 return ECONNREFUSED; 2146 2147 KASSERT(unp->unp_conn == NULL, ("unp is already connected")); 2148 unp->unp_conn = unp2; 2149 2150 switch (so->so_type) { 2151 case SOCK_DGRAM: 2152 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 2153 soisconnected(so); 2154 break; 2155 2156 case SOCK_STREAM: 2157 case SOCK_SEQPACKET: 2158 KASSERT(unp2->unp_conn == NULL, ("unp2 is already connected")); 2159 unp2->unp_conn = unp; 2160 soisconnected(so); 2161 soisconnected(so2); 2162 break; 2163 2164 default: 2165 panic("unp_connect_pair: unknown socket type %d", so->so_type); 2166 } 2167 return 0; 2168 } 2169 2170 static void 2171 unp_drop(struct unpcb *unp, int error) 2172 { 2173 struct unpcb *unp2; 2174 2175 ASSERT_LWKT_TOKEN_HELD(&unp_token); 2176 UNP_ASSERT_TOKEN_HELD(unp); 2177 KASSERT(unp->unp_flags & UNP_DETACHED, ("unp is not detached")); 2178 2179 unp_disconnect(unp, error); 2180 2181 while ((unp2 = LIST_FIRST(&unp->unp_refs)) != NULL) { 2182 lwkt_getpooltoken(unp2); 2183 unp_disconnect(unp2, ECONNRESET); 2184 lwkt_relpooltoken(unp2); 2185 } 2186 unp_setflags(unp, UNP_DROPPED); 2187 } 2188 2189 static void 2190 unp_defdiscard_taskfunc(void *arg __unused, int pending __unused) 2191 { 2192 struct unp_defdiscard *d; 2193 2194 spin_lock(&unp_defdiscard_spin); 2195 while ((d = SLIST_FIRST(&unp_defdiscard_head)) != NULL) { 2196 SLIST_REMOVE_HEAD(&unp_defdiscard_head, next); 2197 spin_unlock(&unp_defdiscard_spin); 2198 2199 closef(d->fp, NULL); 2200 kfree(d, M_UNPCB); 2201 2202 spin_lock(&unp_defdiscard_spin); 2203 } 2204 spin_unlock(&unp_defdiscard_spin); 2205 } 2206