xref: /dragonfly/sys/kern/uipc_msg.c (revision 7b1120e5)
1 /*
2  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/msgport2.h>
44 #include <sys/spinlock2.h>
45 #include <sys/sysctl.h>
46 #include <sys/mbuf.h>
47 #include <vm/pmap.h>
48 
49 #include <net/netmsg2.h>
50 #include <net/netisr2.h>
51 #include <sys/socketvar2.h>
52 
53 #include <net/netisr.h>
54 #include <net/netmsg.h>
55 
56 static int async_rcvd_drop_race = 0;
57 SYSCTL_INT(_kern_ipc, OID_AUTO, async_rcvd_drop_race, CTLFLAG_RW,
58     &async_rcvd_drop_race, 0, "# of asynchronized pru_rcvd msg drop races");
59 
60 /*
61  * Abort a socket and free it, asynchronously.  Called from
62  * soabort_async() only.  soabort_async() got a ref on the
63  * socket which we must free on reply.
64  */
65 void
66 so_pru_abort_async(struct socket *so)
67 {
68 	struct netmsg_pru_abort *msg;
69 
70 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
71 	netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
72 		    0, so->so_proto->pr_usrreqs->pru_abort);
73 	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
74 }
75 
76 /*
77  * Abort a socket and free it.  Called from soabort_direct() only.
78  * Caller must make sure that the current CPU is inpcb's owner CPU.
79  * soabort_direct() got a ref on the socket which we must free.
80  */
81 void
82 so_pru_abort_direct(struct socket *so)
83 {
84 	struct netmsg_pru_abort msg;
85 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort;
86 
87 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
88 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
89 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
90 	func((netmsg_t)&msg);
91 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
92 	sofree(msg.base.nm_so);
93 }
94 
95 int
96 so_pru_accept(struct socket *so, struct sockaddr **nam)
97 {
98 	struct netmsg_pru_accept msg;
99 
100 	netmsg_init(&msg.base, so, &curthread->td_msgport,
101 	    0, so->so_proto->pr_usrreqs->pru_accept);
102 	msg.nm_nam = nam;
103 
104 	return lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
105 }
106 
107 int
108 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai)
109 {
110 	struct netmsg_pru_attach msg;
111 	int error;
112 
113 	netmsg_init(&msg.base, so, &curthread->td_msgport,
114 		    0, so->so_proto->pr_usrreqs->pru_attach);
115 	msg.nm_proto = proto;
116 	msg.nm_ai = ai;
117 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
118 	return (error);
119 }
120 
121 int
122 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai)
123 {
124 	struct netmsg_pru_attach msg;
125 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach;
126 
127 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
128 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
129 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
130 	msg.nm_proto = proto;
131 	msg.nm_ai = ai;
132 	func((netmsg_t)&msg);
133 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
134 	return(msg.base.lmsg.ms_error);
135 }
136 
137 int
138 so_pru_attach_fast(struct socket *so, int proto, struct pru_attach_info *ai)
139 {
140 	struct netmsg_pru_attach *msg;
141 	int error;
142 
143 	error = so->so_proto->pr_usrreqs->pru_preattach(so, proto, ai);
144 	if (error)
145 		return error;
146 
147 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK);
148 	if (msg == NULL) {
149 		/*
150 		 * Fail to allocate message; fallback to
151 		 * synchronized pru_attach.
152 		 */
153 		return so_pru_attach(so, proto, NULL /* postattach */);
154 	}
155 
156 	netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
157 	    so->so_proto->pr_usrreqs->pru_attach);
158 	msg->nm_proto = proto;
159 	msg->nm_ai = NULL; /* postattach */
160 	if (so->so_port == netisr_curport())
161 		lwkt_sendmsg_oncpu(so->so_port, &msg->base.lmsg);
162 	else
163 		lwkt_sendmsg(so->so_port, &msg->base.lmsg);
164 
165 	return 0;
166 }
167 
168 /*
169  * NOTE: If the target port changes the bind operation will deal with it.
170  */
171 int
172 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
173 {
174 	struct netmsg_pru_bind msg;
175 	int error;
176 
177 	netmsg_init(&msg.base, so, &curthread->td_msgport,
178 		    0, so->so_proto->pr_usrreqs->pru_bind);
179 	msg.nm_nam = nam;
180 	msg.nm_td = td;		/* used only for prison_ip() */
181 	msg.nm_flags = 0;
182 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
183 	return (error);
184 }
185 
186 int
187 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
188 {
189 	struct netmsg_pru_connect msg;
190 	int error;
191 
192 	netmsg_init(&msg.base, so, &curthread->td_msgport,
193 		    0, so->so_proto->pr_usrreqs->pru_connect);
194 	msg.nm_nam = nam;
195 	msg.nm_td = td;
196 	msg.nm_m = NULL;
197 	msg.nm_sndflags = 0;
198 	msg.nm_flags = 0;
199 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
200 	return (error);
201 }
202 
203 int
204 so_pru_connect_async(struct socket *so, struct sockaddr *nam, struct thread *td)
205 {
206 	struct netmsg_pru_connect *msg;
207 	int error, flags;
208 
209 	KASSERT(so->so_proto->pr_usrreqs->pru_preconnect != NULL,
210 	    ("async pru_connect is not supported"));
211 
212 	/* NOTE: sockaddr immediately follows netmsg */
213 	msg = kmalloc(sizeof(*msg) + nam->sa_len, M_LWKTMSG,
214 	    M_WAITOK | M_NULLOK);
215 	if (msg == NULL) {
216 		/*
217 		 * Fail to allocate message; fallback to
218 		 * synchronized pru_connect.
219 		 */
220 		return so_pru_connect(so, nam, td);
221 	}
222 
223 	error = so->so_proto->pr_usrreqs->pru_preconnect(so, nam, td);
224 	if (error) {
225 		kfree(msg, M_LWKTMSG);
226 		return error;
227 	}
228 
229 	flags = PRUC_ASYNC;
230 	if (td != NULL && (so->so_proto->pr_flags & PR_ACONN_HOLDTD)) {
231 		lwkt_hold(td);
232 		flags |= PRUC_HELDTD;
233 	}
234 
235 	netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
236 	    so->so_proto->pr_usrreqs->pru_connect);
237 	msg->nm_nam = (struct sockaddr *)(msg + 1);
238 	memcpy(msg->nm_nam, nam, nam->sa_len);
239 	msg->nm_td = td;
240 	msg->nm_m = NULL;
241 	msg->nm_sndflags = 0;
242 	msg->nm_flags = flags;
243 	if (so->so_port == netisr_curport())
244 		lwkt_sendmsg_oncpu(so->so_port, &msg->base.lmsg);
245 	else
246 		lwkt_sendmsg(so->so_port, &msg->base.lmsg);
247 	return 0;
248 }
249 
250 int
251 so_pru_connect2(struct socket *so1, struct socket *so2)
252 {
253 	struct netmsg_pru_connect2 msg;
254 	int error;
255 
256 	netmsg_init(&msg.base, so1, &curthread->td_msgport,
257 		    0, so1->so_proto->pr_usrreqs->pru_connect2);
258 	msg.nm_so1 = so1;
259 	msg.nm_so2 = so2;
260 	error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0);
261 	return (error);
262 }
263 
264 /*
265  * WARNING!  Synchronous call from user context.  Control function may do
266  *	     copyin/copyout.
267  */
268 int
269 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data,
270 		      struct ifnet *ifp)
271 {
272 	struct netmsg_pru_control msg;
273 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control;
274 
275 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
276 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
277 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
278 	msg.nm_cmd = cmd;
279 	msg.nm_data = data;
280 	msg.nm_ifp = ifp;
281 	msg.nm_td = curthread;
282 	func((netmsg_t)&msg);
283 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
284 	return(msg.base.lmsg.ms_error);
285 }
286 
287 int
288 so_pru_detach(struct socket *so)
289 {
290 	struct netmsg_pru_detach msg;
291 	int error;
292 
293 	netmsg_init(&msg.base, so, &curthread->td_msgport,
294 		    0, so->so_proto->pr_usrreqs->pru_detach);
295 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
296 	return (error);
297 }
298 
299 int
300 so_pru_detach_direct(struct socket *so)
301 {
302 	struct netmsg_pru_detach msg;
303 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach;
304 
305 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
306 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
307 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
308 	func((netmsg_t)&msg);
309 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
310 	return(msg.base.lmsg.ms_error);
311 }
312 
313 int
314 so_pru_disconnect(struct socket *so)
315 {
316 	struct netmsg_pru_disconnect msg;
317 	int error;
318 
319 	netmsg_init(&msg.base, so, &curthread->td_msgport,
320 		    0, so->so_proto->pr_usrreqs->pru_disconnect);
321 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
322 	return (error);
323 }
324 
325 void
326 so_pru_disconnect_direct(struct socket *so)
327 {
328 	struct netmsg_pru_disconnect msg;
329 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect;
330 
331 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
332 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
333 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
334 	func((netmsg_t)&msg);
335 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
336 }
337 
338 int
339 so_pru_listen(struct socket *so, struct thread *td)
340 {
341 	struct netmsg_pru_listen msg;
342 	int error;
343 
344 	netmsg_init(&msg.base, so, &curthread->td_msgport,
345 		    0, so->so_proto->pr_usrreqs->pru_listen);
346 	msg.nm_td = td;		/* used only for prison_ip() XXX JH */
347 	msg.nm_flags = 0;
348 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
349 	return (error);
350 }
351 
352 int
353 so_pru_peeraddr(struct socket *so, struct sockaddr **nam)
354 {
355 	struct netmsg_pru_peeraddr msg;
356 	int error;
357 
358 	netmsg_init(&msg.base, so, &curthread->td_msgport,
359 		    0, so->so_proto->pr_usrreqs->pru_peeraddr);
360 	msg.nm_nam = nam;
361 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
362 	return (error);
363 }
364 
365 int
366 so_pru_rcvd(struct socket *so, int flags)
367 {
368 	struct netmsg_pru_rcvd msg;
369 	int error;
370 
371 	netmsg_init(&msg.base, so, &curthread->td_msgport,
372 		    0, so->so_proto->pr_usrreqs->pru_rcvd);
373 	msg.nm_flags = flags;
374 	msg.nm_pru_flags = 0;
375 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
376 	return (error);
377 }
378 
379 void
380 so_pru_rcvd_async(struct socket *so)
381 {
382 	lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
383 
384 	KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
385 	    ("async pru_rcvd is not supported"));
386 
387 	/*
388 	 * WARNING!  Spinlock is a bit dodgy, use hacked up sendmsg
389 	 *	     to avoid deadlocking.
390 	 */
391 	spin_lock(&so->so_rcvd_spin);
392 	if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) {
393 		if (lmsg->ms_flags & MSGF_DONE) {
394 			lwkt_sendmsg_prepare(so->so_port, lmsg);
395 			spin_unlock(&so->so_rcvd_spin);
396 			if (so->so_port == netisr_curport())
397 				lwkt_sendmsg_start_oncpu(so->so_port, lmsg);
398 			else
399 				lwkt_sendmsg_start(so->so_port, lmsg);
400 		} else {
401 			spin_unlock(&so->so_rcvd_spin);
402 		}
403 	} else {
404 		spin_unlock(&so->so_rcvd_spin);
405 	}
406 }
407 
408 int
409 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags)
410 {
411 	struct netmsg_pru_rcvoob msg;
412 	int error;
413 
414 	netmsg_init(&msg.base, so, &curthread->td_msgport,
415 		    0, so->so_proto->pr_usrreqs->pru_rcvoob);
416 	msg.nm_m = m;
417 	msg.nm_flags = flags;
418 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
419 	return (error);
420 }
421 
422 /*
423  * NOTE: If the target port changes the implied connect will deal with it.
424  */
425 int
426 so_pru_send(struct socket *so, int flags, struct mbuf *m,
427 	    struct sockaddr *addr, struct mbuf *control, struct thread *td)
428 {
429 	struct netmsg_pru_send msg;
430 	int error;
431 
432 	netmsg_init(&msg.base, so, &curthread->td_msgport,
433 		    0, so->so_proto->pr_usrreqs->pru_send);
434 	msg.nm_flags = flags;
435 	msg.nm_m = m;
436 	msg.nm_addr = addr;
437 	msg.nm_control = control;
438 	msg.nm_td = td;
439 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
440 	return (error);
441 }
442 
443 void
444 so_pru_sync(struct socket *so)
445 {
446 	struct netmsg_base msg;
447 
448 	netmsg_init(&msg, so, &curthread->td_msgport, 0,
449 	    netmsg_sync_handler);
450 	lwkt_domsg(so->so_port, &msg.lmsg, 0);
451 }
452 
453 void
454 so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
455     struct sockaddr *addr0, struct mbuf *control, struct thread *td)
456 {
457 	struct netmsg_pru_send *msg;
458 	struct sockaddr *addr = NULL;
459 
460 	KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
461 	    ("async pru_send is not supported"));
462 
463 	if (addr0 != NULL) {
464 		addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK | M_NULLOK);
465 		if (addr == NULL) {
466 			/*
467 			 * Fail to allocate address; fallback to
468 			 * synchronized pru_send.
469 			 */
470 			so_pru_send(so, flags, m, addr0, control, td);
471 			return;
472 		}
473 		memcpy(addr, addr0, addr0->sa_len);
474 		flags |= PRUS_FREEADDR;
475 	}
476 	flags |= PRUS_NOREPLY;
477 
478 	if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) {
479 		lwkt_hold(td);
480 		flags |= PRUS_HELDTD;
481 	}
482 
483 	msg = &m->m_hdr.mh_sndmsg;
484 	netmsg_init(&msg->base, so, &netisr_apanic_rport,
485 		    0, so->so_proto->pr_usrreqs->pru_send);
486 	msg->nm_flags = flags;
487 	msg->nm_m = m;
488 	msg->nm_addr = addr;
489 	msg->nm_control = control;
490 	msg->nm_td = td;
491 	if (so->so_port == netisr_curport())
492 		lwkt_sendmsg_oncpu(so->so_port, &msg->base.lmsg);
493 	else
494 		lwkt_sendmsg(so->so_port, &msg->base.lmsg);
495 }
496 
497 int
498 so_pru_sense(struct socket *so, struct stat *sb)
499 {
500 	struct netmsg_pru_sense msg;
501 	int error;
502 
503 	netmsg_init(&msg.base, so, &curthread->td_msgport,
504 		    0, so->so_proto->pr_usrreqs->pru_sense);
505 	msg.nm_stat = sb;
506 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
507 	return (error);
508 }
509 
510 int
511 so_pru_shutdown(struct socket *so)
512 {
513 	struct netmsg_pru_shutdown msg;
514 	int error;
515 
516 	netmsg_init(&msg.base, so, &curthread->td_msgport,
517 		    0, so->so_proto->pr_usrreqs->pru_shutdown);
518 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
519 	return (error);
520 }
521 
522 int
523 so_pru_sockaddr(struct socket *so, struct sockaddr **nam)
524 {
525 	struct netmsg_pru_sockaddr msg;
526 	int error;
527 
528 	netmsg_init(&msg.base, so, &curthread->td_msgport,
529 		    0, so->so_proto->pr_usrreqs->pru_sockaddr);
530 	msg.nm_nam = nam;
531 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
532 	return (error);
533 }
534 
535 int
536 so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
537 {
538 	struct netmsg_pr_ctloutput msg;
539 	int error;
540 
541 	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
542 
543 	if (sopt->sopt_dir == SOPT_SET && so->so_proto->pr_ctloutmsg != NULL) {
544 		struct netmsg_pr_ctloutput *amsg;
545 
546 		/* Fast path: asynchronous pr_ctloutput */
547 		amsg = so->so_proto->pr_ctloutmsg(sopt);
548 		if (amsg != NULL) {
549 			netmsg_init(&amsg->base, so, &netisr_afree_rport, 0,
550 			    so->so_proto->pr_ctloutput);
551 			/* nm_flags and nm_sopt are setup by pr_ctloutmsg */
552 			if (so->so_port == netisr_curport()) {
553 				lwkt_sendmsg_oncpu(so->so_port,
554 				    &amsg->base.lmsg);
555 			} else {
556 				lwkt_sendmsg(so->so_port, &amsg->base.lmsg);
557 			}
558 			return 0;
559 		}
560 		/* FALLTHROUGH */
561 	}
562 
563 	netmsg_init(&msg.base, so, &curthread->td_msgport,
564 		    0, so->so_proto->pr_ctloutput);
565 	msg.nm_flags = 0;
566 	msg.nm_sopt = sopt;
567 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
568 	return (error);
569 }
570 
571 struct lwkt_port *
572 so_pr_ctlport(struct protosw *pr, int cmd, struct sockaddr *arg,
573     void *extra, int *cpuid)
574 {
575 	if (pr->pr_ctlport == NULL)
576 		return NULL;
577 	KKASSERT(pr->pr_ctlinput != NULL);
578 
579 	return pr->pr_ctlport(cmd, arg, extra, cpuid);
580 }
581 
582 /*
583  * Protocol control input, typically via icmp.
584  *
585  * If the protocol pr_ctlport is not NULL we call it to figure out the
586  * protocol port.  If NULL is returned we can just return, otherwise
587  * we issue a netmsg to call pr_ctlinput in the proper thread.
588  *
589  * This must be done synchronously as arg and/or extra may point to
590  * temporary data.
591  */
592 void
593 so_pr_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra)
594 {
595 	struct netmsg_pr_ctlinput msg;
596 	lwkt_port_t port;
597 	int cpuid;
598 
599 	port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
600 	if (port == NULL)
601 		return;
602 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
603 		    0, pr->pr_ctlinput);
604 	msg.nm_cmd = cmd;
605 	msg.nm_direct = 0;
606 	msg.nm_arg = arg;
607 	msg.nm_extra = extra;
608 	lwkt_domsg(port, &msg.base.lmsg, 0);
609 }
610 
611 void
612 so_pr_ctlinput_direct(struct protosw *pr, int cmd, struct sockaddr *arg,
613     void *extra)
614 {
615 	struct netmsg_pr_ctlinput msg;
616 	netisr_fn_t func;
617 	lwkt_port_t port;
618 	int cpuid;
619 
620 	port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
621 	if (port == NULL)
622 		return;
623 	if (cpuid != netisr_ncpus && cpuid != mycpuid)
624 		return;
625 
626 	func = pr->pr_ctlinput;
627 	netmsg_init(&msg.base, NULL, &netisr_adone_rport, 0, func);
628 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
629 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
630 	msg.nm_cmd = cmd;
631 	msg.nm_direct = 1;
632 	msg.nm_arg = arg;
633 	msg.nm_extra = extra;
634 	func((netmsg_t)&msg);
635 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
636 }
637 
638 /*
639  * If we convert all the protosw pr_ functions for all the protocols
640  * to take a message directly, this layer can go away.  For the moment
641  * our dispatcher ignores the return value, but since we are handling
642  * the replymsg ourselves we return EASYNC by convention.
643  */
644 
645 /*
646  * Handle a predicate event request.  This function is only called once
647  * when the predicate message queueing request is received.
648  */
649 void
650 netmsg_so_notify(netmsg_t msg)
651 {
652 	struct socket *so = msg->base.nm_so;
653 	struct signalsockbuf *ssb;
654 
655 	ssb = (msg->notify.nm_etype & NM_REVENT) ? &so->so_rcv : &so->so_snd;
656 
657 	/*
658 	 * Reply immediately if the event has occured, otherwise queue the
659 	 * request.
660 	 *
661 	 * NOTE: Socket can change if this is an accept predicate so cache
662 	 *	 the token.
663 	 */
664 	lwkt_getpooltoken(so);
665 	atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
666 	if (msg->notify.nm_predicate(&msg->notify)) {
667 		if (TAILQ_EMPTY(&ssb->ssb_mlist))
668 			atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
669 		lwkt_relpooltoken(so);
670 		lwkt_replymsg(&msg->base.lmsg,
671 			      msg->base.lmsg.ms_error);
672 	} else {
673 		TAILQ_INSERT_TAIL(&ssb->ssb_mlist, &msg->notify, nm_list);
674 		/*
675 		 * NOTE:
676 		 * If predict ever blocks, 'tok' will be released, so
677 		 * SSB_MEVENT set beforehand could have been cleared
678 		 * when we reach here.  In case that happens, we set
679 		 * SSB_MEVENT again, after the notify has been queued.
680 		 */
681 		atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
682 		lwkt_relpooltoken(so);
683 	}
684 }
685 
686 /*
687  * Called by doio when trying to abort a netmsg_so_notify message.
688  * Unlike the other functions this one is dispatched directly by
689  * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
690  *
691  * The original message, lmsg, is under the control of the caller and
692  * will not be destroyed until we return so we can safely reference it
693  * in our synchronous abort request.
694  *
695  * This part of the abort request occurs on the originating cpu which
696  * means we may race the message flags and the original message may
697  * not even have been processed by the target cpu yet.
698  */
699 void
700 netmsg_so_notify_doabort(lwkt_msg_t lmsg)
701 {
702 	struct netmsg_so_notify_abort msg;
703 
704 	if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
705 		const struct netmsg_base *nmsg =
706 		    (const struct netmsg_base *)lmsg;
707 
708 		netmsg_init(&msg.base, nmsg->nm_so, &curthread->td_msgport,
709 			    0, netmsg_so_notify_abort);
710 		msg.nm_notifymsg = (void *)lmsg;
711 		lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0);
712 	}
713 }
714 
715 /*
716  * Predicate requests can be aborted.  This function is only called once
717  * and will interlock against processing/reply races (since such races
718  * occur on the same thread that controls the port where the abort is
719  * requeued).
720  *
721  * This part of the abort request occurs on the target cpu.  The message
722  * flags must be tested again in case the test that we did on the
723  * originating cpu raced.  Since messages are handled in sequence, the
724  * original message will have already been handled by the loop and either
725  * replied to or queued.
726  *
727  * We really only need to interlock with MSGF_REPLY (a bit that is set on
728  * our cpu when we reply).  Note that MSGF_DONE is not set until the
729  * reply reaches the originating cpu.  Test both bits anyway.
730  */
731 void
732 netmsg_so_notify_abort(netmsg_t msg)
733 {
734 	struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort;
735 	struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg;
736 	struct signalsockbuf *ssb;
737 
738 	/*
739 	 * The original notify message is not destroyed until after the
740 	 * abort request is returned, so we can check its state.
741 	 */
742 	lwkt_getpooltoken(nmsg->base.nm_so);
743 	if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
744 		ssb = (nmsg->nm_etype & NM_REVENT) ?
745 				&nmsg->base.nm_so->so_rcv :
746 				&nmsg->base.nm_so->so_snd;
747 		TAILQ_REMOVE(&ssb->ssb_mlist, nmsg, nm_list);
748 		lwkt_relpooltoken(nmsg->base.nm_so);
749 		lwkt_replymsg(&nmsg->base.lmsg, EINTR);
750 	} else {
751 		lwkt_relpooltoken(nmsg->base.nm_so);
752 	}
753 
754 	/*
755 	 * Reply to the abort message
756 	 */
757 	lwkt_replymsg(&abrtmsg->base.lmsg, 0);
758 }
759 
760 void
761 so_async_rcvd_reply(struct socket *so)
762 {
763 	/*
764 	 * Spinlock safe, reply runs to degenerate lwkt_null_replyport()
765 	 */
766 	spin_lock(&so->so_rcvd_spin);
767 	lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0);
768 	spin_unlock(&so->so_rcvd_spin);
769 }
770 
771 void
772 so_async_rcvd_drop(struct socket *so)
773 {
774 	lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
775 
776 	/*
777 	 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg()
778 	 */
779 	spin_lock(&so->so_rcvd_spin);
780 	so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD;
781 again:
782 	lwkt_dropmsg(lmsg);
783 	if ((lmsg->ms_flags & MSGF_DONE) == 0) {
784 		++async_rcvd_drop_race;
785 		ssleep(so, &so->so_rcvd_spin, 0, "soadrop", 1);
786 		goto again;
787 	}
788 	spin_unlock(&so->so_rcvd_spin);
789 }
790