xref: /dragonfly/sys/kern/uipc_msg.c (revision 31524921)
1 /*
2  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <vm/pmap.h>
49 
50 #include <net/netmsg2.h>
51 #include <sys/socketvar2.h>
52 
53 #include <net/netisr.h>
54 #include <net/netmsg.h>
55 
56 static int async_rcvd_drop_race = 0;
57 SYSCTL_INT(_kern_ipc, OID_AUTO, async_rcvd_drop_race, CTLFLAG_RW,
58     &async_rcvd_drop_race, 0, "# of asynchronized pru_rcvd msg drop races");
59 
60 /*
61  * Abort a socket and free it, asynchronously.  Called from
62  * soabort_async() only.  soabort_async() got a ref on the
63  * socket which we must free on reply.
64  */
65 void
66 so_pru_abort_async(struct socket *so)
67 {
68 	struct netmsg_pru_abort *msg;
69 
70 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
71 	netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
72 		    0, so->so_proto->pr_usrreqs->pru_abort);
73 	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
74 }
75 
76 /*
77  * Abort a socket and free it.  Called from soabort_direct() only.
78  * Caller must make sure that the current CPU is inpcb's owner CPU.
79  * soabort_direct() got a ref on the socket which we must free.
80  */
81 void
82 so_pru_abort_direct(struct socket *so)
83 {
84 	struct netmsg_pru_abort msg;
85 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort;
86 
87 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
88 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
89 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
90 	func((netmsg_t)&msg);
91 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
92 	sofree(msg.base.nm_so);
93 }
94 
95 int
96 so_pru_accept(struct socket *so, struct sockaddr **nam)
97 {
98 	struct netmsg_pru_accept msg;
99 
100 	netmsg_init(&msg.base, so, &curthread->td_msgport,
101 	    0, so->so_proto->pr_usrreqs->pru_accept);
102 	msg.nm_nam = nam;
103 
104 	return lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
105 }
106 
107 int
108 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai)
109 {
110 	struct netmsg_pru_attach msg;
111 	int error;
112 
113 	netmsg_init(&msg.base, so, &curthread->td_msgport,
114 		    0, so->so_proto->pr_usrreqs->pru_attach);
115 	msg.nm_proto = proto;
116 	msg.nm_ai = ai;
117 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
118 	return (error);
119 }
120 
121 int
122 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai)
123 {
124 	struct netmsg_pru_attach msg;
125 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach;
126 
127 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
128 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
129 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
130 	msg.nm_proto = proto;
131 	msg.nm_ai = ai;
132 	func((netmsg_t)&msg);
133 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
134 	return(msg.base.lmsg.ms_error);
135 }
136 
137 int
138 so_pru_attach_fast(struct socket *so, int proto, struct pru_attach_info *ai)
139 {
140 	struct netmsg_pru_attach *msg;
141 	int error;
142 
143 	error = so->so_proto->pr_usrreqs->pru_preattach(so, proto, ai);
144 	if (error)
145 		return error;
146 
147 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK);
148 	if (msg == NULL) {
149 		/*
150 		 * Fail to allocate message; fallback to
151 		 * synchronized pru_attach.
152 		 */
153 		return so_pru_attach(so, proto, NULL /* postattach */);
154 	}
155 
156 	netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
157 	    so->so_proto->pr_usrreqs->pru_attach);
158 	msg->nm_proto = proto;
159 	msg->nm_ai = NULL; /* postattach */
160 	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
161 
162 	return 0;
163 }
164 
165 /*
166  * NOTE: If the target port changes the bind operation will deal with it.
167  */
168 int
169 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
170 {
171 	struct netmsg_pru_bind msg;
172 	int error;
173 
174 	netmsg_init(&msg.base, so, &curthread->td_msgport,
175 		    0, so->so_proto->pr_usrreqs->pru_bind);
176 	msg.nm_nam = nam;
177 	msg.nm_td = td;		/* used only for prison_ip() */
178 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
179 	return (error);
180 }
181 
182 int
183 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
184 {
185 	struct netmsg_pru_connect msg;
186 	int error;
187 
188 	netmsg_init(&msg.base, so, &curthread->td_msgport,
189 		    0, so->so_proto->pr_usrreqs->pru_connect);
190 	msg.nm_nam = nam;
191 	msg.nm_td = td;
192 	msg.nm_m = NULL;
193 	msg.nm_sndflags = 0;
194 	msg.nm_flags = 0;
195 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
196 	return (error);
197 }
198 
199 int
200 so_pru_connect_async(struct socket *so, struct sockaddr *nam, struct thread *td)
201 {
202 	struct netmsg_pru_connect *msg;
203 	int error, flags;
204 
205 	KASSERT(so->so_proto->pr_usrreqs->pru_preconnect != NULL,
206 	    ("async pru_connect is not supported"));
207 
208 	/* NOTE: sockaddr immediately follows netmsg */
209 	msg = kmalloc(sizeof(*msg) + nam->sa_len, M_LWKTMSG,
210 	    M_WAITOK | M_NULLOK);
211 	if (msg == NULL) {
212 		/*
213 		 * Fail to allocate message; fallback to
214 		 * synchronized pru_connect.
215 		 */
216 		return so_pru_connect(so, nam, td);
217 	}
218 
219 	error = so->so_proto->pr_usrreqs->pru_preconnect(so, nam, td);
220 	if (error) {
221 		kfree(msg, M_LWKTMSG);
222 		return error;
223 	}
224 
225 	flags = PRUC_ASYNC;
226 	if (td != NULL && (so->so_proto->pr_flags & PR_ACONN_HOLDTD)) {
227 		lwkt_hold(td);
228 		flags |= PRUC_HELDTD;
229 	}
230 
231 	netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
232 	    so->so_proto->pr_usrreqs->pru_connect);
233 	msg->nm_nam = (struct sockaddr *)(msg + 1);
234 	memcpy(msg->nm_nam, nam, nam->sa_len);
235 	msg->nm_td = td;
236 	msg->nm_m = NULL;
237 	msg->nm_sndflags = 0;
238 	msg->nm_flags = flags;
239 	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
240 	return 0;
241 }
242 
243 int
244 so_pru_connect2(struct socket *so1, struct socket *so2)
245 {
246 	struct netmsg_pru_connect2 msg;
247 	int error;
248 
249 	netmsg_init(&msg.base, so1, &curthread->td_msgport,
250 		    0, so1->so_proto->pr_usrreqs->pru_connect2);
251 	msg.nm_so1 = so1;
252 	msg.nm_so2 = so2;
253 	error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0);
254 	return (error);
255 }
256 
257 /*
258  * WARNING!  Synchronous call from user context.  Control function may do
259  *	     copyin/copyout.
260  */
261 int
262 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data,
263 		      struct ifnet *ifp)
264 {
265 	struct netmsg_pru_control msg;
266 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control;
267 
268 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
269 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
270 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
271 	msg.nm_cmd = cmd;
272 	msg.nm_data = data;
273 	msg.nm_ifp = ifp;
274 	msg.nm_td = curthread;
275 	func((netmsg_t)&msg);
276 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
277 	return(msg.base.lmsg.ms_error);
278 }
279 
280 int
281 so_pru_detach(struct socket *so)
282 {
283 	struct netmsg_pru_detach msg;
284 	int error;
285 
286 	netmsg_init(&msg.base, so, &curthread->td_msgport,
287 		    0, so->so_proto->pr_usrreqs->pru_detach);
288 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
289 	return (error);
290 }
291 
292 int
293 so_pru_detach_direct(struct socket *so)
294 {
295 	struct netmsg_pru_detach msg;
296 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach;
297 
298 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
299 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
300 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
301 	func((netmsg_t)&msg);
302 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
303 	return(msg.base.lmsg.ms_error);
304 }
305 
306 int
307 so_pru_disconnect(struct socket *so)
308 {
309 	struct netmsg_pru_disconnect msg;
310 	int error;
311 
312 	netmsg_init(&msg.base, so, &curthread->td_msgport,
313 		    0, so->so_proto->pr_usrreqs->pru_disconnect);
314 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
315 	return (error);
316 }
317 
318 void
319 so_pru_disconnect_direct(struct socket *so)
320 {
321 	struct netmsg_pru_disconnect msg;
322 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect;
323 
324 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
325 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
326 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
327 	func((netmsg_t)&msg);
328 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
329 }
330 
331 int
332 so_pru_listen(struct socket *so, struct thread *td)
333 {
334 	struct netmsg_pru_listen msg;
335 	int error;
336 
337 	netmsg_init(&msg.base, so, &curthread->td_msgport,
338 		    0, so->so_proto->pr_usrreqs->pru_listen);
339 	msg.nm_td = td;		/* used only for prison_ip() XXX JH */
340 	msg.nm_flags = 0;
341 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
342 	return (error);
343 }
344 
345 int
346 so_pru_peeraddr(struct socket *so, struct sockaddr **nam)
347 {
348 	struct netmsg_pru_peeraddr msg;
349 	int error;
350 
351 	netmsg_init(&msg.base, so, &curthread->td_msgport,
352 		    0, so->so_proto->pr_usrreqs->pru_peeraddr);
353 	msg.nm_nam = nam;
354 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
355 	return (error);
356 }
357 
358 int
359 so_pru_rcvd(struct socket *so, int flags)
360 {
361 	struct netmsg_pru_rcvd msg;
362 	int error;
363 
364 	netmsg_init(&msg.base, so, &curthread->td_msgport,
365 		    0, so->so_proto->pr_usrreqs->pru_rcvd);
366 	msg.nm_flags = flags;
367 	msg.nm_pru_flags = 0;
368 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
369 	return (error);
370 }
371 
372 void
373 so_pru_rcvd_async(struct socket *so)
374 {
375 	lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
376 
377 	KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
378 	    ("async pru_rcvd is not supported"));
379 
380 	/*
381 	 * WARNING!  Spinlock is a bit dodgy, use hacked up sendmsg
382 	 *	     to avoid deadlocking.
383 	 */
384 	spin_lock(&so->so_rcvd_spin);
385 	if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) {
386 		if (lmsg->ms_flags & MSGF_DONE) {
387 			lwkt_sendmsg_prepare(so->so_port, lmsg);
388 			spin_unlock(&so->so_rcvd_spin);
389 			lwkt_sendmsg_start(so->so_port, lmsg);
390 		} else {
391 			spin_unlock(&so->so_rcvd_spin);
392 		}
393 	} else {
394 		spin_unlock(&so->so_rcvd_spin);
395 	}
396 }
397 
398 int
399 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags)
400 {
401 	struct netmsg_pru_rcvoob msg;
402 	int error;
403 
404 	netmsg_init(&msg.base, so, &curthread->td_msgport,
405 		    0, so->so_proto->pr_usrreqs->pru_rcvoob);
406 	msg.nm_m = m;
407 	msg.nm_flags = flags;
408 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
409 	return (error);
410 }
411 
412 /*
413  * NOTE: If the target port changes the implied connect will deal with it.
414  */
415 int
416 so_pru_send(struct socket *so, int flags, struct mbuf *m,
417 	    struct sockaddr *addr, struct mbuf *control, struct thread *td)
418 {
419 	struct netmsg_pru_send msg;
420 	int error;
421 
422 	netmsg_init(&msg.base, so, &curthread->td_msgport,
423 		    0, so->so_proto->pr_usrreqs->pru_send);
424 	msg.nm_flags = flags;
425 	msg.nm_m = m;
426 	msg.nm_addr = addr;
427 	msg.nm_control = control;
428 	msg.nm_td = td;
429 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
430 	return (error);
431 }
432 
433 void
434 so_pru_sync(struct socket *so)
435 {
436 	struct netmsg_base msg;
437 
438 	netmsg_init(&msg, so, &curthread->td_msgport, 0,
439 	    netmsg_sync_handler);
440 	lwkt_domsg(so->so_port, &msg.lmsg, 0);
441 }
442 
443 void
444 so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
445     struct sockaddr *addr0, struct mbuf *control, struct thread *td)
446 {
447 	struct netmsg_pru_send *msg;
448 	struct sockaddr *addr = NULL;
449 
450 	KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
451 	    ("async pru_send is not supported"));
452 
453 	if (addr0 != NULL) {
454 		addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK | M_NULLOK);
455 		if (addr == NULL) {
456 			/*
457 			 * Fail to allocate address; fallback to
458 			 * synchronized pru_send.
459 			 */
460 			so_pru_send(so, flags, m, addr0, control, td);
461 			return;
462 		}
463 		memcpy(addr, addr0, addr0->sa_len);
464 		flags |= PRUS_FREEADDR;
465 	}
466 	flags |= PRUS_NOREPLY;
467 
468 	if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) {
469 		lwkt_hold(td);
470 		flags |= PRUS_HELDTD;
471 	}
472 
473 	msg = &m->m_hdr.mh_sndmsg;
474 	netmsg_init(&msg->base, so, &netisr_apanic_rport,
475 		    0, so->so_proto->pr_usrreqs->pru_send);
476 	msg->nm_flags = flags;
477 	msg->nm_m = m;
478 	msg->nm_addr = addr;
479 	msg->nm_control = control;
480 	msg->nm_td = td;
481 	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
482 }
483 
484 int
485 so_pru_sense(struct socket *so, struct stat *sb)
486 {
487 	struct netmsg_pru_sense msg;
488 	int error;
489 
490 	netmsg_init(&msg.base, so, &curthread->td_msgport,
491 		    0, so->so_proto->pr_usrreqs->pru_sense);
492 	msg.nm_stat = sb;
493 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
494 	return (error);
495 }
496 
497 int
498 so_pru_shutdown(struct socket *so)
499 {
500 	struct netmsg_pru_shutdown msg;
501 	int error;
502 
503 	netmsg_init(&msg.base, so, &curthread->td_msgport,
504 		    0, so->so_proto->pr_usrreqs->pru_shutdown);
505 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
506 	return (error);
507 }
508 
509 int
510 so_pru_sockaddr(struct socket *so, struct sockaddr **nam)
511 {
512 	struct netmsg_pru_sockaddr msg;
513 	int error;
514 
515 	netmsg_init(&msg.base, so, &curthread->td_msgport,
516 		    0, so->so_proto->pr_usrreqs->pru_sockaddr);
517 	msg.nm_nam = nam;
518 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
519 	return (error);
520 }
521 
522 int
523 so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
524 {
525 	struct netmsg_pr_ctloutput msg;
526 	int error;
527 
528 	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
529 	netmsg_init(&msg.base, so, &curthread->td_msgport,
530 		    0, so->so_proto->pr_ctloutput);
531 	msg.nm_sopt = sopt;
532 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
533 	return (error);
534 }
535 
536 struct lwkt_port *
537 so_pr_ctlport(struct protosw *pr, int cmd, struct sockaddr *arg,
538     void *extra, int *cpuid)
539 {
540 	if (pr->pr_ctlport == NULL)
541 		return NULL;
542 	KKASSERT(pr->pr_ctlinput != NULL);
543 
544 	return pr->pr_ctlport(cmd, arg, extra, cpuid);
545 }
546 
547 /*
548  * Protocol control input, typically via icmp.
549  *
550  * If the protocol pr_ctlport is not NULL we call it to figure out the
551  * protocol port.  If NULL is returned we can just return, otherwise
552  * we issue a netmsg to call pr_ctlinput in the proper thread.
553  *
554  * This must be done synchronously as arg and/or extra may point to
555  * temporary data.
556  */
557 void
558 so_pr_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra)
559 {
560 	struct netmsg_pr_ctlinput msg;
561 	lwkt_port_t port;
562 	int cpuid;
563 
564 	port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
565 	if (port == NULL)
566 		return;
567 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
568 		    0, pr->pr_ctlinput);
569 	msg.nm_cmd = cmd;
570 	msg.nm_direct = 0;
571 	msg.nm_arg = arg;
572 	msg.nm_extra = extra;
573 	lwkt_domsg(port, &msg.base.lmsg, 0);
574 }
575 
576 void
577 so_pr_ctlinput_direct(struct protosw *pr, int cmd, struct sockaddr *arg,
578     void *extra)
579 {
580 	struct netmsg_pr_ctlinput msg;
581 	netisr_fn_t func;
582 	lwkt_port_t port;
583 	int cpuid;
584 
585 	port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid);
586 	if (port == NULL)
587 		return;
588 	if (cpuid != ncpus && cpuid != mycpuid)
589 		return;
590 
591 	func = pr->pr_ctlinput;
592 	netmsg_init(&msg.base, NULL, &netisr_adone_rport, 0, func);
593 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
594 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
595 	msg.nm_cmd = cmd;
596 	msg.nm_direct = 1;
597 	msg.nm_arg = arg;
598 	msg.nm_extra = extra;
599 	func((netmsg_t)&msg);
600 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
601 }
602 
603 /*
604  * If we convert all the protosw pr_ functions for all the protocols
605  * to take a message directly, this layer can go away.  For the moment
606  * our dispatcher ignores the return value, but since we are handling
607  * the replymsg ourselves we return EASYNC by convention.
608  */
609 
610 /*
611  * Handle a predicate event request.  This function is only called once
612  * when the predicate message queueing request is received.
613  */
614 void
615 netmsg_so_notify(netmsg_t msg)
616 {
617 	struct lwkt_token *tok;
618 	struct signalsockbuf *ssb;
619 
620 	ssb = (msg->notify.nm_etype & NM_REVENT) ?
621 			&msg->base.nm_so->so_rcv :
622 			&msg->base.nm_so->so_snd;
623 
624 	/*
625 	 * Reply immediately if the event has occured, otherwise queue the
626 	 * request.
627 	 *
628 	 * NOTE: Socket can change if this is an accept predicate so cache
629 	 *	 the token.
630 	 */
631 	tok = lwkt_token_pool_lookup(msg->base.nm_so);
632 	lwkt_gettoken(tok);
633 	atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
634 	if (msg->notify.nm_predicate(&msg->notify)) {
635 		if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist))
636 			atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
637 		lwkt_reltoken(tok);
638 		lwkt_replymsg(&msg->base.lmsg,
639 			      msg->base.lmsg.ms_error);
640 	} else {
641 		TAILQ_INSERT_TAIL(&ssb->ssb_kq.ki_mlist, &msg->notify, nm_list);
642 		/*
643 		 * NOTE:
644 		 * If predict ever blocks, 'tok' will be released, so
645 		 * SSB_MEVENT set beforehand could have been cleared
646 		 * when we reach here.  In case that happens, we set
647 		 * SSB_MEVENT again, after the notify has been queued.
648 		 */
649 		atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
650 		lwkt_reltoken(tok);
651 	}
652 }
653 
654 /*
655  * Called by doio when trying to abort a netmsg_so_notify message.
656  * Unlike the other functions this one is dispatched directly by
657  * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
658  *
659  * The original message, lmsg, is under the control of the caller and
660  * will not be destroyed until we return so we can safely reference it
661  * in our synchronous abort request.
662  *
663  * This part of the abort request occurs on the originating cpu which
664  * means we may race the message flags and the original message may
665  * not even have been processed by the target cpu yet.
666  */
667 void
668 netmsg_so_notify_doabort(lwkt_msg_t lmsg)
669 {
670 	struct netmsg_so_notify_abort msg;
671 
672 	if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
673 		const struct netmsg_base *nmsg =
674 		    (const struct netmsg_base *)lmsg;
675 
676 		netmsg_init(&msg.base, nmsg->nm_so, &curthread->td_msgport,
677 			    0, netmsg_so_notify_abort);
678 		msg.nm_notifymsg = (void *)lmsg;
679 		lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0);
680 	}
681 }
682 
683 /*
684  * Predicate requests can be aborted.  This function is only called once
685  * and will interlock against processing/reply races (since such races
686  * occur on the same thread that controls the port where the abort is
687  * requeued).
688  *
689  * This part of the abort request occurs on the target cpu.  The message
690  * flags must be tested again in case the test that we did on the
691  * originating cpu raced.  Since messages are handled in sequence, the
692  * original message will have already been handled by the loop and either
693  * replied to or queued.
694  *
695  * We really only need to interlock with MSGF_REPLY (a bit that is set on
696  * our cpu when we reply).  Note that MSGF_DONE is not set until the
697  * reply reaches the originating cpu.  Test both bits anyway.
698  */
699 void
700 netmsg_so_notify_abort(netmsg_t msg)
701 {
702 	struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort;
703 	struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg;
704 	struct signalsockbuf *ssb;
705 
706 	/*
707 	 * The original notify message is not destroyed until after the
708 	 * abort request is returned, so we can check its state.
709 	 */
710 	lwkt_getpooltoken(nmsg->base.nm_so);
711 	if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
712 		ssb = (nmsg->nm_etype & NM_REVENT) ?
713 				&nmsg->base.nm_so->so_rcv :
714 				&nmsg->base.nm_so->so_snd;
715 		TAILQ_REMOVE(&ssb->ssb_kq.ki_mlist, nmsg, nm_list);
716 		lwkt_relpooltoken(nmsg->base.nm_so);
717 		lwkt_replymsg(&nmsg->base.lmsg, EINTR);
718 	} else {
719 		lwkt_relpooltoken(nmsg->base.nm_so);
720 	}
721 
722 	/*
723 	 * Reply to the abort message
724 	 */
725 	lwkt_replymsg(&abrtmsg->base.lmsg, 0);
726 }
727 
728 void
729 so_async_rcvd_reply(struct socket *so)
730 {
731 	/*
732 	 * Spinlock safe, reply runs to degenerate lwkt_null_replyport()
733 	 */
734 	spin_lock(&so->so_rcvd_spin);
735 	lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0);
736 	spin_unlock(&so->so_rcvd_spin);
737 }
738 
739 void
740 so_async_rcvd_drop(struct socket *so)
741 {
742 	lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
743 
744 	/*
745 	 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg()
746 	 */
747 	spin_lock(&so->so_rcvd_spin);
748 	so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD;
749 again:
750 	lwkt_dropmsg(lmsg);
751 	if ((lmsg->ms_flags & MSGF_DONE) == 0) {
752 		++async_rcvd_drop_race;
753 		ssleep(so, &so->so_rcvd_spin, 0, "soadrop", 1);
754 		goto again;
755 	}
756 	spin_unlock(&so->so_rcvd_spin);
757 }
758