1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/msgport.h> 38 #include <sys/protosw.h> 39 #include <sys/socket.h> 40 #include <sys/socketvar.h> 41 #include <sys/socketops.h> 42 #include <sys/thread.h> 43 #include <sys/thread2.h> 44 #include <sys/msgport2.h> 45 #include <sys/spinlock2.h> 46 #include <sys/sysctl.h> 47 #include <sys/mbuf.h> 48 #include <vm/pmap.h> 49 50 #include <net/netmsg2.h> 51 #include <sys/socketvar2.h> 52 53 #include <net/netisr.h> 54 #include <net/netmsg.h> 55 56 static int async_rcvd_drop_race = 0; 57 SYSCTL_INT(_kern_ipc, OID_AUTO, async_rcvd_drop_race, CTLFLAG_RW, 58 &async_rcvd_drop_race, 0, "# of asynchronized pru_rcvd msg drop races"); 59 60 /* 61 * Abort a socket and free it. Called from soabort() only. soabort() 62 * got a ref on the socket which we must free on reply. 63 */ 64 void 65 so_pru_abort(struct socket *so) 66 { 67 struct netmsg_pru_abort msg; 68 69 netmsg_init(&msg.base, so, &curthread->td_msgport, 70 0, so->so_proto->pr_usrreqs->pru_abort); 71 (void)lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 72 sofree(msg.base.nm_so); 73 } 74 75 /* 76 * Abort a socket and free it, asynchronously. Called from 77 * soaborta() only. soaborta() got a ref on the socket which we must 78 * free on reply. 79 */ 80 void 81 so_pru_aborta(struct socket *so) 82 { 83 struct netmsg_pru_abort *msg; 84 85 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO); 86 netmsg_init(&msg->base, so, &netisr_afree_free_so_rport, 87 0, so->so_proto->pr_usrreqs->pru_abort); 88 lwkt_sendmsg(so->so_port, &msg->base.lmsg); 89 } 90 91 /* 92 * Abort a socket and free it. Called from soabort_oncpu() only. 93 * Caller must make sure that the current CPU is inpcb's owner CPU. 94 */ 95 void 96 so_pru_abort_oncpu(struct socket *so) 97 { 98 struct netmsg_pru_abort msg; 99 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort; 100 101 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 102 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 103 msg.base.lmsg.ms_flags |= MSGF_SYNC; 104 func((netmsg_t)&msg); 105 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 106 sofree(msg.base.nm_so); 107 } 108 109 int 110 so_pru_accept(struct socket *so, struct sockaddr **nam) 111 { 112 struct netmsg_pru_accept msg; 113 114 netmsg_init(&msg.base, so, &curthread->td_msgport, 115 0, so->so_proto->pr_usrreqs->pru_accept); 116 msg.nm_nam = nam; 117 118 return lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 119 } 120 121 int 122 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai) 123 { 124 struct netmsg_pru_attach msg; 125 int error; 126 127 netmsg_init(&msg.base, so, &curthread->td_msgport, 128 0, so->so_proto->pr_usrreqs->pru_attach); 129 msg.nm_proto = proto; 130 msg.nm_ai = ai; 131 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 132 return (error); 133 } 134 135 int 136 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai) 137 { 138 struct netmsg_pru_attach msg; 139 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach; 140 141 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 142 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 143 msg.base.lmsg.ms_flags |= MSGF_SYNC; 144 msg.nm_proto = proto; 145 msg.nm_ai = ai; 146 func((netmsg_t)&msg); 147 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 148 return(msg.base.lmsg.ms_error); 149 } 150 151 /* 152 * NOTE: If the target port changes the bind operation will deal with it. 153 */ 154 int 155 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 156 { 157 struct netmsg_pru_bind msg; 158 int error; 159 160 netmsg_init(&msg.base, so, &curthread->td_msgport, 161 0, so->so_proto->pr_usrreqs->pru_bind); 162 msg.nm_nam = nam; 163 msg.nm_td = td; /* used only for prison_ip() */ 164 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 165 return (error); 166 } 167 168 int 169 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 170 { 171 struct netmsg_pru_connect msg; 172 int error; 173 174 netmsg_init(&msg.base, so, &curthread->td_msgport, 175 0, so->so_proto->pr_usrreqs->pru_connect); 176 msg.nm_nam = nam; 177 msg.nm_td = td; 178 msg.nm_m = NULL; 179 msg.nm_sndflags = 0; 180 msg.nm_flags = 0; 181 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 182 return (error); 183 } 184 185 int 186 so_pru_connect_async(struct socket *so, struct sockaddr *nam, struct thread *td) 187 { 188 struct netmsg_pru_connect *msg; 189 int error, flags; 190 191 KASSERT(so->so_proto->pr_usrreqs->pru_preconnect != NULL, 192 ("async pru_connect is not supported")); 193 194 /* NOTE: sockaddr immediately follows netmsg */ 195 msg = kmalloc(sizeof(*msg) + nam->sa_len, M_LWKTMSG, M_NOWAIT); 196 if (msg == NULL) { 197 /* 198 * Fail to allocate address w/o waiting; 199 * fallback to synchronized pru_connect. 200 */ 201 return so_pru_connect(so, nam, td); 202 } 203 204 error = so->so_proto->pr_usrreqs->pru_preconnect(so, nam, td); 205 if (error) { 206 kfree(msg, M_LWKTMSG); 207 return error; 208 } 209 210 flags = PRUC_ASYNC; 211 if (td != NULL && (so->so_proto->pr_flags & PR_ACONN_HOLDTD)) { 212 lwkt_hold(td); 213 flags |= PRUC_HELDTD; 214 } 215 216 netmsg_init(&msg->base, so, &netisr_afree_rport, 0, 217 so->so_proto->pr_usrreqs->pru_connect); 218 msg->nm_nam = (struct sockaddr *)(msg + 1); 219 memcpy(msg->nm_nam, nam, nam->sa_len); 220 msg->nm_td = td; 221 msg->nm_m = NULL; 222 msg->nm_sndflags = 0; 223 msg->nm_flags = flags; 224 lwkt_sendmsg(so->so_port, &msg->base.lmsg); 225 return 0; 226 } 227 228 int 229 so_pru_connect2(struct socket *so1, struct socket *so2) 230 { 231 struct netmsg_pru_connect2 msg; 232 int error; 233 234 netmsg_init(&msg.base, so1, &curthread->td_msgport, 235 0, so1->so_proto->pr_usrreqs->pru_connect2); 236 msg.nm_so1 = so1; 237 msg.nm_so2 = so2; 238 error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0); 239 return (error); 240 } 241 242 /* 243 * WARNING! Synchronous call from user context. Control function may do 244 * copyin/copyout. 245 */ 246 int 247 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data, 248 struct ifnet *ifp) 249 { 250 struct netmsg_pru_control msg; 251 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control; 252 253 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 254 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 255 msg.base.lmsg.ms_flags |= MSGF_SYNC; 256 msg.nm_cmd = cmd; 257 msg.nm_data = data; 258 msg.nm_ifp = ifp; 259 msg.nm_td = curthread; 260 func((netmsg_t)&msg); 261 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 262 return(msg.base.lmsg.ms_error); 263 } 264 265 int 266 so_pru_detach(struct socket *so) 267 { 268 struct netmsg_pru_detach msg; 269 int error; 270 271 netmsg_init(&msg.base, so, &curthread->td_msgport, 272 0, so->so_proto->pr_usrreqs->pru_detach); 273 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 274 return (error); 275 } 276 277 void 278 so_pru_detach_direct(struct socket *so) 279 { 280 struct netmsg_pru_detach msg; 281 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach; 282 283 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 284 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 285 msg.base.lmsg.ms_flags |= MSGF_SYNC; 286 func((netmsg_t)&msg); 287 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 288 } 289 290 int 291 so_pru_disconnect(struct socket *so) 292 { 293 struct netmsg_pru_disconnect msg; 294 int error; 295 296 netmsg_init(&msg.base, so, &curthread->td_msgport, 297 0, so->so_proto->pr_usrreqs->pru_disconnect); 298 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 299 return (error); 300 } 301 302 void 303 so_pru_disconnect_direct(struct socket *so) 304 { 305 struct netmsg_pru_disconnect msg; 306 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect; 307 308 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 309 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 310 msg.base.lmsg.ms_flags |= MSGF_SYNC; 311 func((netmsg_t)&msg); 312 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 313 } 314 315 int 316 so_pru_listen(struct socket *so, struct thread *td) 317 { 318 struct netmsg_pru_listen msg; 319 int error; 320 321 netmsg_init(&msg.base, so, &curthread->td_msgport, 322 0, so->so_proto->pr_usrreqs->pru_listen); 323 msg.nm_td = td; /* used only for prison_ip() XXX JH */ 324 msg.nm_flags = 0; 325 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 326 return (error); 327 } 328 329 int 330 so_pru_peeraddr(struct socket *so, struct sockaddr **nam) 331 { 332 struct netmsg_pru_peeraddr msg; 333 int error; 334 335 netmsg_init(&msg.base, so, &curthread->td_msgport, 336 0, so->so_proto->pr_usrreqs->pru_peeraddr); 337 msg.nm_nam = nam; 338 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 339 return (error); 340 } 341 342 int 343 so_pru_rcvd(struct socket *so, int flags) 344 { 345 struct netmsg_pru_rcvd msg; 346 int error; 347 348 netmsg_init(&msg.base, so, &curthread->td_msgport, 349 0, so->so_proto->pr_usrreqs->pru_rcvd); 350 msg.nm_flags = flags; 351 msg.nm_pru_flags = 0; 352 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 353 return (error); 354 } 355 356 void 357 so_pru_rcvd_async(struct socket *so) 358 { 359 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg; 360 361 KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD, 362 ("async pru_rcvd is not supported")); 363 364 /* 365 * WARNING! Spinlock is a bit dodgy, use hacked up sendmsg 366 * to avoid deadlocking. 367 */ 368 spin_lock(&so->so_rcvd_spin); 369 if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) { 370 if (lmsg->ms_flags & MSGF_DONE) { 371 lwkt_sendmsg_stage1(so->so_port, lmsg); 372 spin_unlock(&so->so_rcvd_spin); 373 lwkt_sendmsg_stage2(so->so_port, lmsg); 374 } else { 375 spin_unlock(&so->so_rcvd_spin); 376 } 377 } else { 378 spin_unlock(&so->so_rcvd_spin); 379 } 380 } 381 382 int 383 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags) 384 { 385 struct netmsg_pru_rcvoob msg; 386 int error; 387 388 netmsg_init(&msg.base, so, &curthread->td_msgport, 389 0, so->so_proto->pr_usrreqs->pru_rcvoob); 390 msg.nm_m = m; 391 msg.nm_flags = flags; 392 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 393 return (error); 394 } 395 396 /* 397 * NOTE: If the target port changes the implied connect will deal with it. 398 */ 399 int 400 so_pru_send(struct socket *so, int flags, struct mbuf *m, 401 struct sockaddr *addr, struct mbuf *control, struct thread *td) 402 { 403 struct netmsg_pru_send msg; 404 int error; 405 406 netmsg_init(&msg.base, so, &curthread->td_msgport, 407 0, so->so_proto->pr_usrreqs->pru_send); 408 msg.nm_flags = flags; 409 msg.nm_m = m; 410 msg.nm_addr = addr; 411 msg.nm_control = control; 412 msg.nm_td = td; 413 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 414 return (error); 415 } 416 417 void 418 so_pru_sync(struct socket *so) 419 { 420 struct netmsg_base msg; 421 422 netmsg_init(&msg, so, &curthread->td_msgport, 0, 423 netmsg_sync_handler); 424 lwkt_domsg(so->so_port, &msg.lmsg, 0); 425 } 426 427 void 428 so_pru_send_async(struct socket *so, int flags, struct mbuf *m, 429 struct sockaddr *addr0, struct mbuf *control, struct thread *td) 430 { 431 struct netmsg_pru_send *msg; 432 struct sockaddr *addr = NULL; 433 434 KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND, 435 ("async pru_send is not supported")); 436 437 if (addr0 != NULL) { 438 addr = kmalloc(addr0->sa_len, M_SONAME, M_NOWAIT); 439 if (addr == NULL) { 440 /* 441 * Fail to allocate address w/o waiting; 442 * fallback to synchronized pru_send. 443 */ 444 so_pru_send(so, flags, m, addr0, control, td); 445 return; 446 } 447 memcpy(addr, addr0, addr0->sa_len); 448 flags |= PRUS_FREEADDR; 449 } 450 flags |= PRUS_NOREPLY; 451 452 if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) { 453 lwkt_hold(td); 454 flags |= PRUS_HELDTD; 455 } 456 457 msg = &m->m_hdr.mh_sndmsg; 458 netmsg_init(&msg->base, so, &netisr_apanic_rport, 459 0, so->so_proto->pr_usrreqs->pru_send); 460 msg->nm_flags = flags; 461 msg->nm_m = m; 462 msg->nm_addr = addr; 463 msg->nm_control = control; 464 msg->nm_td = td; 465 lwkt_sendmsg(so->so_port, &msg->base.lmsg); 466 } 467 468 int 469 so_pru_sense(struct socket *so, struct stat *sb) 470 { 471 struct netmsg_pru_sense msg; 472 int error; 473 474 netmsg_init(&msg.base, so, &curthread->td_msgport, 475 0, so->so_proto->pr_usrreqs->pru_sense); 476 msg.nm_stat = sb; 477 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 478 return (error); 479 } 480 481 int 482 so_pru_shutdown(struct socket *so) 483 { 484 struct netmsg_pru_shutdown msg; 485 int error; 486 487 netmsg_init(&msg.base, so, &curthread->td_msgport, 488 0, so->so_proto->pr_usrreqs->pru_shutdown); 489 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 490 return (error); 491 } 492 493 int 494 so_pru_sockaddr(struct socket *so, struct sockaddr **nam) 495 { 496 struct netmsg_pru_sockaddr msg; 497 int error; 498 499 netmsg_init(&msg.base, so, &curthread->td_msgport, 500 0, so->so_proto->pr_usrreqs->pru_sockaddr); 501 msg.nm_nam = nam; 502 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 503 return (error); 504 } 505 506 int 507 so_pr_ctloutput(struct socket *so, struct sockopt *sopt) 508 { 509 struct netmsg_pr_ctloutput msg; 510 int error; 511 512 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 513 netmsg_init(&msg.base, so, &curthread->td_msgport, 514 0, so->so_proto->pr_ctloutput); 515 msg.nm_sopt = sopt; 516 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 517 return (error); 518 } 519 520 /* 521 * Protocol control input, typically via icmp. 522 * 523 * If the protocol pr_ctlport is not NULL we call it to figure out the 524 * protocol port. If NULL is returned we can just return, otherwise 525 * we issue a netmsg to call pr_ctlinput in the proper thread. 526 * 527 * This must be done synchronously as arg and/or extra may point to 528 * temporary data. 529 */ 530 void 531 so_pru_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra) 532 { 533 struct netmsg_pru_ctlinput msg; 534 lwkt_port_t port; 535 536 if (pr->pr_ctlport == NULL) 537 return; 538 KKASSERT(pr->pr_ctlinput != NULL); 539 port = pr->pr_ctlport(cmd, arg, extra); 540 if (port == NULL) 541 return; 542 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 543 0, pr->pr_ctlinput); 544 msg.nm_cmd = cmd; 545 msg.nm_arg = arg; 546 msg.nm_extra = extra; 547 lwkt_domsg(port, &msg.base.lmsg, 0); 548 } 549 550 /* 551 * If we convert all the protosw pr_ functions for all the protocols 552 * to take a message directly, this layer can go away. For the moment 553 * our dispatcher ignores the return value, but since we are handling 554 * the replymsg ourselves we return EASYNC by convention. 555 */ 556 557 /* 558 * Handle a predicate event request. This function is only called once 559 * when the predicate message queueing request is received. 560 */ 561 void 562 netmsg_so_notify(netmsg_t msg) 563 { 564 struct lwkt_token *tok; 565 struct signalsockbuf *ssb; 566 567 ssb = (msg->notify.nm_etype & NM_REVENT) ? 568 &msg->base.nm_so->so_rcv : 569 &msg->base.nm_so->so_snd; 570 571 /* 572 * Reply immediately if the event has occured, otherwise queue the 573 * request. 574 * 575 * NOTE: Socket can change if this is an accept predicate so cache 576 * the token. 577 */ 578 tok = lwkt_token_pool_lookup(msg->base.nm_so); 579 lwkt_gettoken(tok); 580 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); 581 if (msg->notify.nm_predicate(&msg->notify)) { 582 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) 583 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); 584 lwkt_reltoken(tok); 585 lwkt_replymsg(&msg->base.lmsg, 586 msg->base.lmsg.ms_error); 587 } else { 588 TAILQ_INSERT_TAIL(&ssb->ssb_kq.ki_mlist, &msg->notify, nm_list); 589 /* 590 * NOTE: 591 * If predict ever blocks, 'tok' will be released, so 592 * SSB_MEVENT set beforehand could have been cleared 593 * when we reach here. In case that happens, we set 594 * SSB_MEVENT again, after the notify has been queued. 595 */ 596 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); 597 lwkt_reltoken(tok); 598 } 599 } 600 601 /* 602 * Called by doio when trying to abort a netmsg_so_notify message. 603 * Unlike the other functions this one is dispatched directly by 604 * the LWKT subsystem, so it takes a lwkt_msg_t as an argument. 605 * 606 * The original message, lmsg, is under the control of the caller and 607 * will not be destroyed until we return so we can safely reference it 608 * in our synchronous abort request. 609 * 610 * This part of the abort request occurs on the originating cpu which 611 * means we may race the message flags and the original message may 612 * not even have been processed by the target cpu yet. 613 */ 614 void 615 netmsg_so_notify_doabort(lwkt_msg_t lmsg) 616 { 617 struct netmsg_so_notify_abort msg; 618 619 if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) { 620 const struct netmsg_base *nmsg = 621 (const struct netmsg_base *)lmsg; 622 623 netmsg_init(&msg.base, nmsg->nm_so, &curthread->td_msgport, 624 0, netmsg_so_notify_abort); 625 msg.nm_notifymsg = (void *)lmsg; 626 lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0); 627 } 628 } 629 630 /* 631 * Predicate requests can be aborted. This function is only called once 632 * and will interlock against processing/reply races (since such races 633 * occur on the same thread that controls the port where the abort is 634 * requeued). 635 * 636 * This part of the abort request occurs on the target cpu. The message 637 * flags must be tested again in case the test that we did on the 638 * originating cpu raced. Since messages are handled in sequence, the 639 * original message will have already been handled by the loop and either 640 * replied to or queued. 641 * 642 * We really only need to interlock with MSGF_REPLY (a bit that is set on 643 * our cpu when we reply). Note that MSGF_DONE is not set until the 644 * reply reaches the originating cpu. Test both bits anyway. 645 */ 646 void 647 netmsg_so_notify_abort(netmsg_t msg) 648 { 649 struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort; 650 struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg; 651 struct signalsockbuf *ssb; 652 653 /* 654 * The original notify message is not destroyed until after the 655 * abort request is returned, so we can check its state. 656 */ 657 lwkt_getpooltoken(nmsg->base.nm_so); 658 if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) { 659 ssb = (nmsg->nm_etype & NM_REVENT) ? 660 &nmsg->base.nm_so->so_rcv : 661 &nmsg->base.nm_so->so_snd; 662 TAILQ_REMOVE(&ssb->ssb_kq.ki_mlist, nmsg, nm_list); 663 lwkt_relpooltoken(nmsg->base.nm_so); 664 lwkt_replymsg(&nmsg->base.lmsg, EINTR); 665 } else { 666 lwkt_relpooltoken(nmsg->base.nm_so); 667 } 668 669 /* 670 * Reply to the abort message 671 */ 672 lwkt_replymsg(&abrtmsg->base.lmsg, 0); 673 } 674 675 void 676 so_async_rcvd_reply(struct socket *so) 677 { 678 /* 679 * Spinlock safe, reply runs to degenerate lwkt_null_replyport() 680 */ 681 spin_lock(&so->so_rcvd_spin); 682 lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0); 683 spin_unlock(&so->so_rcvd_spin); 684 } 685 686 void 687 so_async_rcvd_drop(struct socket *so) 688 { 689 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg; 690 691 /* 692 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg() 693 */ 694 spin_lock(&so->so_rcvd_spin); 695 so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD; 696 again: 697 lwkt_dropmsg(lmsg); 698 if ((lmsg->ms_flags & MSGF_DONE) == 0) { 699 ++async_rcvd_drop_race; 700 ssleep(so, &so->so_rcvd_spin, 0, "soadrop", 1); 701 goto again; 702 } 703 spin_unlock(&so->so_rcvd_spin); 704 } 705