xref: /dragonfly/sys/kern/uipc_socket.c (revision 4d0c54c1)
1 /*
2  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1982, 1986, 1988, 1990, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
67  * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68  */
69 
70 #include "opt_inet.h"
71 #include "opt_sctp.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/fcntl.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/domain.h>
79 #include <sys/file.h>			/* for struct knote */
80 #include <sys/kernel.h>
81 #include <sys/event.h>
82 #include <sys/proc.h>
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/socketops.h>
87 #include <sys/resourcevar.h>
88 #include <sys/signalvar.h>
89 #include <sys/sysctl.h>
90 #include <sys/uio.h>
91 #include <sys/jail.h>
92 #include <vm/vm_zone.h>
93 #include <vm/pmap.h>
94 #include <net/netmsg2.h>
95 
96 #include <sys/thread2.h>
97 #include <sys/socketvar2.h>
98 #include <sys/spinlock2.h>
99 
100 #include <machine/limits.h>
101 
102 extern int tcp_sosend_agglim;
103 extern int tcp_sosend_async;
104 extern int udp_sosend_async;
105 extern int udp_sosend_prepend;
106 
107 #ifdef INET
108 static int	 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
109 #endif /* INET */
110 
111 static void 	filt_sordetach(struct knote *kn);
112 static int 	filt_soread(struct knote *kn, long hint);
113 static void 	filt_sowdetach(struct knote *kn);
114 static int	filt_sowrite(struct knote *kn, long hint);
115 static int	filt_solisten(struct knote *kn, long hint);
116 
117 static void	sodiscard(struct socket *so);
118 static int	soclose_sync(struct socket *so, int fflag);
119 static void	soclose_fast(struct socket *so);
120 
121 static struct filterops solisten_filtops =
122 	{ FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten };
123 static struct filterops soread_filtops =
124 	{ FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
125 static struct filterops sowrite_filtops =
126 	{ FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite };
127 static struct filterops soexcept_filtops =
128 	{ FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
129 
130 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct");
131 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
132 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
133 
134 
135 static int somaxconn = SOMAXCONN;
136 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
137     &somaxconn, 0, "Maximum pending socket connection queue size");
138 
139 static int use_soclose_fast = 1;
140 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW,
141     &use_soclose_fast, 0, "Fast socket close");
142 
143 int use_soaccept_pred_fast = 1;
144 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW,
145     &use_soaccept_pred_fast, 0, "Fast socket accept predication");
146 
147 int use_sendfile_async = 1;
148 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW,
149     &use_sendfile_async, 0, "sendfile uses asynchronized pru_send");
150 
151 /*
152  * Socket operation routines.
153  * These routines are called by the routines in
154  * sys_socket.c or from a system process, and
155  * implement the semantics of socket operations by
156  * switching out to the protocol specific routines.
157  */
158 
159 /*
160  * Get a socket structure, and initialize it.
161  * Note that it would probably be better to allocate socket
162  * and PCB at the same time, but I'm not convinced that all
163  * the protocols can be easily modified to do this.
164  */
165 struct socket *
166 soalloc(int waitok, struct protosw *pr)
167 {
168 	struct socket *so;
169 	unsigned waitmask;
170 
171 	waitmask = waitok ? M_WAITOK : M_NOWAIT;
172 	so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask);
173 	if (so) {
174 		/* XXX race condition for reentrant kernel */
175 		so->so_proto = pr;
176 		TAILQ_INIT(&so->so_aiojobq);
177 		TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist);
178 		TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist);
179 		lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok");
180 		lwkt_token_init(&so->so_snd.ssb_token, "sndtok");
181 		spin_init(&so->so_rcvd_spin);
182 		netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport,
183 		    MSGF_DROPABLE, so->so_proto->pr_usrreqs->pru_rcvd);
184 		so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC;
185 		so->so_state = SS_NOFDREF;
186 		so->so_refs = 1;
187 	}
188 	return so;
189 }
190 
191 int
192 socreate(int dom, struct socket **aso, int type,
193 	int proto, struct thread *td)
194 {
195 	struct proc *p = td->td_proc;
196 	struct protosw *prp;
197 	struct socket *so;
198 	struct pru_attach_info ai;
199 	int error;
200 
201 	if (proto)
202 		prp = pffindproto(dom, proto, type);
203 	else
204 		prp = pffindtype(dom, type);
205 
206 	if (prp == NULL || prp->pr_usrreqs->pru_attach == 0)
207 		return (EPROTONOSUPPORT);
208 
209 	if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
210 	    prp->pr_domain->dom_family != PF_LOCAL &&
211 	    prp->pr_domain->dom_family != PF_INET &&
212 	    prp->pr_domain->dom_family != PF_INET6 &&
213 	    prp->pr_domain->dom_family != PF_ROUTE) {
214 		return (EPROTONOSUPPORT);
215 	}
216 
217 	if (prp->pr_type != type)
218 		return (EPROTOTYPE);
219 	so = soalloc(p != NULL, prp);
220 	if (so == NULL)
221 		return (ENOBUFS);
222 
223 	/*
224 	 * Callers of socreate() presumably will connect up a descriptor
225 	 * and call soclose() if they cannot.  This represents our so_refs
226 	 * (which should be 1) from soalloc().
227 	 */
228 	soclrstate(so, SS_NOFDREF);
229 
230 	/*
231 	 * Set a default port for protocol processing.  No action will occur
232 	 * on the socket on this port until an inpcb is attached to it and
233 	 * is able to match incoming packets, or until the socket becomes
234 	 * available to userland.
235 	 *
236 	 * We normally default the socket to the protocol thread on cpu 0.
237 	 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
238 	 * thread and all pr_*()/pru_*() calls are executed synchronously.
239 	 */
240 	if (prp->pr_flags & PR_SYNC_PORT)
241 		so->so_port = &netisr_sync_port;
242 	else
243 		so->so_port = netisr_portfn(0);
244 
245 	TAILQ_INIT(&so->so_incomp);
246 	TAILQ_INIT(&so->so_comp);
247 	so->so_type = type;
248 	so->so_cred = crhold(p->p_ucred);
249 	ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
250 	ai.p_ucred = p->p_ucred;
251 	ai.fd_rdir = p->p_fd->fd_rdir;
252 
253 	/*
254 	 * Auto-sizing of socket buffers is managed by the protocols and
255 	 * the appropriate flags must be set in the pru_attach function.
256 	 */
257 	error = so_pru_attach(so, proto, &ai);
258 	if (error) {
259 		sosetstate(so, SS_NOFDREF);
260 		sofree(so);	/* from soalloc */
261 		return error;
262 	}
263 
264 	/*
265 	 * NOTE: Returns referenced socket.
266 	 */
267 	*aso = so;
268 	return (0);
269 }
270 
271 int
272 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
273 {
274 	int error;
275 
276 	error = so_pru_bind(so, nam, td);
277 	return (error);
278 }
279 
280 static void
281 sodealloc(struct socket *so)
282 {
283 	if (so->so_rcv.ssb_hiwat)
284 		(void)chgsbsize(so->so_cred->cr_uidinfo,
285 		    &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY);
286 	if (so->so_snd.ssb_hiwat)
287 		(void)chgsbsize(so->so_cred->cr_uidinfo,
288 		    &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY);
289 #ifdef INET
290 	/* remove accept filter if present */
291 	if (so->so_accf != NULL)
292 		do_setopt_accept_filter(so, NULL);
293 #endif /* INET */
294 	crfree(so->so_cred);
295 	if (so->so_faddr != NULL)
296 		kfree(so->so_faddr, M_SONAME);
297 	kfree(so, M_SOCKET);
298 }
299 
300 int
301 solisten(struct socket *so, int backlog, struct thread *td)
302 {
303 	int error;
304 #ifdef SCTP
305 	short oldopt, oldqlimit;
306 #endif /* SCTP */
307 
308 	if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING))
309 		return (EINVAL);
310 
311 #ifdef SCTP
312 	oldopt = so->so_options;
313 	oldqlimit = so->so_qlimit;
314 #endif /* SCTP */
315 
316 	lwkt_gettoken(&so->so_rcv.ssb_token);
317 	if (TAILQ_EMPTY(&so->so_comp))
318 		so->so_options |= SO_ACCEPTCONN;
319 	lwkt_reltoken(&so->so_rcv.ssb_token);
320 	if (backlog < 0 || backlog > somaxconn)
321 		backlog = somaxconn;
322 	so->so_qlimit = backlog;
323 	/* SCTP needs to look at tweak both the inbound backlog parameter AND
324 	 * the so_options (UDP model both connect's and gets inbound
325 	 * connections .. implicitly).
326 	 */
327 	error = so_pru_listen(so, td);
328 	if (error) {
329 #ifdef SCTP
330 		/* Restore the params */
331 		so->so_options = oldopt;
332 		so->so_qlimit = oldqlimit;
333 #endif /* SCTP */
334 		return (error);
335 	}
336 	return (0);
337 }
338 
339 /*
340  * Destroy a disconnected socket.  This routine is a NOP if entities
341  * still have a reference on the socket:
342  *
343  *	so_pcb -	The protocol stack still has a reference
344  *	SS_NOFDREF -	There is no longer a file pointer reference
345  */
346 void
347 sofree(struct socket *so)
348 {
349 	struct socket *head;
350 
351 	/*
352 	 * This is a bit hackish at the moment.  We need to interlock
353 	 * any accept queue we are on before we potentially lose the
354 	 * last reference to avoid races against a re-reference from
355 	 * someone operating on the queue.
356 	 */
357 	while ((head = so->so_head) != NULL) {
358 		lwkt_getpooltoken(head);
359 		if (so->so_head == head)
360 			break;
361 		lwkt_relpooltoken(head);
362 	}
363 
364 	/*
365 	 * Arbitrage the last free.
366 	 */
367 	KKASSERT(so->so_refs > 0);
368 	if (atomic_fetchadd_int(&so->so_refs, -1) != 1) {
369 		if (head)
370 			lwkt_relpooltoken(head);
371 		return;
372 	}
373 
374 	KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF));
375 	KKASSERT((so->so_state & SS_ASSERTINPROG) == 0);
376 
377 	/*
378 	 * We're done, remove ourselves from the accept queue we are
379 	 * on, if we are on one.
380 	 */
381 	if (head != NULL) {
382 		if (so->so_state & SS_INCOMP) {
383 			TAILQ_REMOVE(&head->so_incomp, so, so_list);
384 			head->so_incqlen--;
385 		} else if (so->so_state & SS_COMP) {
386 			/*
387 			 * We must not decommission a socket that's
388 			 * on the accept(2) queue.  If we do, then
389 			 * accept(2) may hang after select(2) indicated
390 			 * that the listening socket was ready.
391 			 */
392 			lwkt_relpooltoken(head);
393 			return;
394 		} else {
395 			panic("sofree: not queued");
396 		}
397 		soclrstate(so, SS_INCOMP);
398 		so->so_head = NULL;
399 		lwkt_relpooltoken(head);
400 	}
401 	ssb_release(&so->so_snd, so);
402 	sorflush(so);
403 	sodealloc(so);
404 }
405 
406 /*
407  * Close a socket on last file table reference removal.
408  * Initiate disconnect if connected.
409  * Free socket when disconnect complete.
410  */
411 int
412 soclose(struct socket *so, int fflag)
413 {
414 	int error;
415 
416 	funsetown(&so->so_sigio);
417 	if (!use_soclose_fast ||
418 	    (so->so_proto->pr_flags & PR_SYNC_PORT) ||
419 	    (so->so_options & SO_LINGER)) {
420 		error = soclose_sync(so, fflag);
421 	} else {
422 		soclose_fast(so);
423 		error = 0;
424 	}
425 	return error;
426 }
427 
428 static void
429 sodiscard(struct socket *so)
430 {
431 	lwkt_getpooltoken(so);
432 	if (so->so_options & SO_ACCEPTCONN) {
433 		struct socket *sp;
434 
435 		while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
436 			TAILQ_REMOVE(&so->so_incomp, sp, so_list);
437 			soclrstate(sp, SS_INCOMP);
438 			sp->so_head = NULL;
439 			so->so_incqlen--;
440 			soaborta(sp);
441 		}
442 		while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
443 			TAILQ_REMOVE(&so->so_comp, sp, so_list);
444 			soclrstate(sp, SS_COMP);
445 			sp->so_head = NULL;
446 			so->so_qlen--;
447 			soaborta(sp);
448 		}
449 	}
450 	lwkt_relpooltoken(so);
451 
452 	if (so->so_state & SS_NOFDREF)
453 		panic("soclose: NOFDREF");
454 	sosetstate(so, SS_NOFDREF);	/* take ref */
455 }
456 
457 static int
458 soclose_sync(struct socket *so, int fflag)
459 {
460 	int error = 0;
461 
462 	if (so->so_pcb == NULL)
463 		goto discard;
464 	if (so->so_state & SS_ISCONNECTED) {
465 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
466 			error = sodisconnect(so);
467 			if (error)
468 				goto drop;
469 		}
470 		if (so->so_options & SO_LINGER) {
471 			if ((so->so_state & SS_ISDISCONNECTING) &&
472 			    (fflag & FNONBLOCK))
473 				goto drop;
474 			while (so->so_state & SS_ISCONNECTED) {
475 				error = tsleep(&so->so_timeo, PCATCH,
476 					       "soclos", so->so_linger * hz);
477 				if (error)
478 					break;
479 			}
480 		}
481 	}
482 drop:
483 	if (so->so_pcb) {
484 		int error2;
485 
486 		error2 = so_pru_detach(so);
487 		if (error == 0)
488 			error = error2;
489 	}
490 discard:
491 	sodiscard(so);
492 	so_pru_sync(so);	/* unpend async sending */
493 	sofree(so);		/* dispose of ref */
494 
495 	return (error);
496 }
497 
498 static void
499 soclose_sofree_async_handler(netmsg_t msg)
500 {
501 	sofree(msg->base.nm_so);
502 }
503 
504 static void
505 soclose_sofree_async(struct socket *so)
506 {
507 	struct netmsg_base *base = &so->so_clomsg;
508 
509 	netmsg_init(base, so, &netisr_apanic_rport, 0,
510 	    soclose_sofree_async_handler);
511 	lwkt_sendmsg(so->so_port, &base->lmsg);
512 }
513 
514 static void
515 soclose_disconn_async_handler(netmsg_t msg)
516 {
517 	struct socket *so = msg->base.nm_so;
518 
519 	if ((so->so_state & SS_ISCONNECTED) &&
520 	    (so->so_state & SS_ISDISCONNECTING) == 0)
521 		so_pru_disconnect_direct(so);
522 
523 	if (so->so_pcb)
524 		so_pru_detach_direct(so);
525 
526 	sodiscard(so);
527 	sofree(so);
528 }
529 
530 static void
531 soclose_disconn_async(struct socket *so)
532 {
533 	struct netmsg_base *base = &so->so_clomsg;
534 
535 	netmsg_init(base, so, &netisr_apanic_rport, 0,
536 	    soclose_disconn_async_handler);
537 	lwkt_sendmsg(so->so_port, &base->lmsg);
538 }
539 
540 static void
541 soclose_detach_async_handler(netmsg_t msg)
542 {
543 	struct socket *so = msg->base.nm_so;
544 
545 	if (so->so_pcb)
546 		so_pru_detach_direct(so);
547 
548 	sodiscard(so);
549 	sofree(so);
550 }
551 
552 static void
553 soclose_detach_async(struct socket *so)
554 {
555 	struct netmsg_base *base = &so->so_clomsg;
556 
557 	netmsg_init(base, so, &netisr_apanic_rport, 0,
558 	    soclose_detach_async_handler);
559 	lwkt_sendmsg(so->so_port, &base->lmsg);
560 }
561 
562 static void
563 soclose_fast(struct socket *so)
564 {
565 	if (so->so_pcb == NULL)
566 		goto discard;
567 
568 	if ((so->so_state & SS_ISCONNECTED) &&
569 	    (so->so_state & SS_ISDISCONNECTING) == 0) {
570 		soclose_disconn_async(so);
571 		return;
572 	}
573 
574 	if (so->so_pcb) {
575 		soclose_detach_async(so);
576 		return;
577 	}
578 
579 discard:
580 	sodiscard(so);
581 	soclose_sofree_async(so);
582 }
583 
584 /*
585  * Abort and destroy a socket.  Only one abort can be in progress
586  * at any given moment.
587  */
588 void
589 soabort(struct socket *so)
590 {
591 	soreference(so);
592 	so_pru_abort(so);
593 }
594 
595 void
596 soaborta(struct socket *so)
597 {
598 	soreference(so);
599 	so_pru_aborta(so);
600 }
601 
602 void
603 soabort_oncpu(struct socket *so)
604 {
605 	soreference(so);
606 	so_pru_abort_oncpu(so);
607 }
608 
609 /*
610  * so is passed in ref'd, which becomes owned by
611  * the cleared SS_NOFDREF flag.
612  */
613 void
614 soaccept_generic(struct socket *so)
615 {
616 	if ((so->so_state & SS_NOFDREF) == 0)
617 		panic("soaccept: !NOFDREF");
618 	soclrstate(so, SS_NOFDREF);	/* owned by lack of SS_NOFDREF */
619 }
620 
621 int
622 soaccept(struct socket *so, struct sockaddr **nam)
623 {
624 	int error;
625 
626 	soaccept_generic(so);
627 	error = so_pru_accept(so, nam);
628 	return (error);
629 }
630 
631 int
632 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
633 {
634 	int error;
635 
636 	if (so->so_options & SO_ACCEPTCONN)
637 		return (EOPNOTSUPP);
638 	/*
639 	 * If protocol is connection-based, can only connect once.
640 	 * Otherwise, if connected, try to disconnect first.
641 	 * This allows user to disconnect by connecting to, e.g.,
642 	 * a null address.
643 	 */
644 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
645 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
646 	    (error = sodisconnect(so)))) {
647 		error = EISCONN;
648 	} else {
649 		/*
650 		 * Prevent accumulated error from previous connection
651 		 * from biting us.
652 		 */
653 		so->so_error = 0;
654 		error = so_pru_connect(so, nam, td);
655 	}
656 	return (error);
657 }
658 
659 int
660 soconnect2(struct socket *so1, struct socket *so2)
661 {
662 	int error;
663 
664 	error = so_pru_connect2(so1, so2);
665 	return (error);
666 }
667 
668 int
669 sodisconnect(struct socket *so)
670 {
671 	int error;
672 
673 	if ((so->so_state & SS_ISCONNECTED) == 0) {
674 		error = ENOTCONN;
675 		goto bad;
676 	}
677 	if (so->so_state & SS_ISDISCONNECTING) {
678 		error = EALREADY;
679 		goto bad;
680 	}
681 	error = so_pru_disconnect(so);
682 bad:
683 	return (error);
684 }
685 
686 #define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
687 /*
688  * Send on a socket.
689  * If send must go all at once and message is larger than
690  * send buffering, then hard error.
691  * Lock against other senders.
692  * If must go all at once and not enough room now, then
693  * inform user that this would block and do nothing.
694  * Otherwise, if nonblocking, send as much as possible.
695  * The data to be sent is described by "uio" if nonzero,
696  * otherwise by the mbuf chain "top" (which must be null
697  * if uio is not).  Data provided in mbuf chain must be small
698  * enough to send all at once.
699  *
700  * Returns nonzero on error, timeout or signal; callers
701  * must check for short counts if EINTR/ERESTART are returned.
702  * Data and control buffers are freed on return.
703  */
704 int
705 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
706 	struct mbuf *top, struct mbuf *control, int flags,
707 	struct thread *td)
708 {
709 	struct mbuf **mp;
710 	struct mbuf *m;
711 	size_t resid;
712 	int space, len;
713 	int clen = 0, error, dontroute, mlen;
714 	int atomic = sosendallatonce(so) || top;
715 	int pru_flags;
716 
717 	if (uio) {
718 		resid = uio->uio_resid;
719 	} else {
720 		resid = (size_t)top->m_pkthdr.len;
721 #ifdef INVARIANTS
722 		len = 0;
723 		for (m = top; m; m = m->m_next)
724 			len += m->m_len;
725 		KKASSERT(top->m_pkthdr.len == len);
726 #endif
727 	}
728 
729 	/*
730 	 * WARNING!  resid is unsigned, space and len are signed.  space
731 	 * 	     can wind up negative if the sockbuf is overcommitted.
732 	 *
733 	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
734 	 * type sockets since that's an error.
735 	 */
736 	if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
737 		error = EINVAL;
738 		goto out;
739 	}
740 
741 	dontroute =
742 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
743 	    (so->so_proto->pr_flags & PR_ATOMIC);
744 	if (td->td_lwp != NULL)
745 		td->td_lwp->lwp_ru.ru_msgsnd++;
746 	if (control)
747 		clen = control->m_len;
748 #define	gotoerr(errcode)	{ error = errcode; goto release; }
749 
750 restart:
751 	error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
752 	if (error)
753 		goto out;
754 
755 	do {
756 		if (so->so_state & SS_CANTSENDMORE)
757 			gotoerr(EPIPE);
758 		if (so->so_error) {
759 			error = so->so_error;
760 			so->so_error = 0;
761 			goto release;
762 		}
763 		if ((so->so_state & SS_ISCONNECTED) == 0) {
764 			/*
765 			 * `sendto' and `sendmsg' is allowed on a connection-
766 			 * based socket if it supports implied connect.
767 			 * Return ENOTCONN if not connected and no address is
768 			 * supplied.
769 			 */
770 			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
771 			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
772 				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
773 				    !(resid == 0 && clen != 0))
774 					gotoerr(ENOTCONN);
775 			} else if (addr == NULL)
776 			    gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
777 				   ENOTCONN : EDESTADDRREQ);
778 		}
779 		if ((atomic && resid > so->so_snd.ssb_hiwat) ||
780 		    clen > so->so_snd.ssb_hiwat) {
781 			gotoerr(EMSGSIZE);
782 		}
783 		space = ssb_space(&so->so_snd);
784 		if (flags & MSG_OOB)
785 			space += 1024;
786 		if ((space < 0 || (size_t)space < resid + clen) && uio &&
787 		    (atomic || space < so->so_snd.ssb_lowat || space < clen)) {
788 			if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
789 				gotoerr(EWOULDBLOCK);
790 			ssb_unlock(&so->so_snd);
791 			error = ssb_wait(&so->so_snd);
792 			if (error)
793 				goto out;
794 			goto restart;
795 		}
796 		mp = &top;
797 		space -= clen;
798 		do {
799 		    if (uio == NULL) {
800 			/*
801 			 * Data is prepackaged in "top".
802 			 */
803 			resid = 0;
804 			if (flags & MSG_EOR)
805 				top->m_flags |= M_EOR;
806 		    } else do {
807 			if (resid > INT_MAX)
808 				resid = INT_MAX;
809 			m = m_getl((int)resid, MB_WAIT, MT_DATA,
810 				   top == NULL ? M_PKTHDR : 0, &mlen);
811 			if (top == NULL) {
812 				m->m_pkthdr.len = 0;
813 				m->m_pkthdr.rcvif = NULL;
814 			}
815 			len = imin((int)szmin(mlen, resid), space);
816 			if (resid < MINCLSIZE) {
817 				/*
818 				 * For datagram protocols, leave room
819 				 * for protocol headers in first mbuf.
820 				 */
821 				if (atomic && top == NULL && len < mlen)
822 					MH_ALIGN(m, len);
823 			}
824 			space -= len;
825 			error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
826 			resid = uio->uio_resid;
827 			m->m_len = len;
828 			*mp = m;
829 			top->m_pkthdr.len += len;
830 			if (error)
831 				goto release;
832 			mp = &m->m_next;
833 			if (resid == 0) {
834 				if (flags & MSG_EOR)
835 					top->m_flags |= M_EOR;
836 				break;
837 			}
838 		    } while (space > 0 && atomic);
839 		    if (dontroute)
840 			    so->so_options |= SO_DONTROUTE;
841 		    if (flags & MSG_OOB) {
842 		    	    pru_flags = PRUS_OOB;
843 		    } else if ((flags & MSG_EOF) &&
844 		    	       (so->so_proto->pr_flags & PR_IMPLOPCL) &&
845 			       (resid == 0)) {
846 			    /*
847 			     * If the user set MSG_EOF, the protocol
848 			     * understands this flag and nothing left to
849 			     * send then use PRU_SEND_EOF instead of PRU_SEND.
850 			     */
851 		    	    pru_flags = PRUS_EOF;
852 		    } else if (resid > 0 && space > 0) {
853 			    /* If there is more to send, set PRUS_MORETOCOME */
854 		    	    pru_flags = PRUS_MORETOCOME;
855 		    } else {
856 		    	    pru_flags = 0;
857 		    }
858 		    /*
859 		     * XXX all the SS_CANTSENDMORE checks previously
860 		     * done could be out of date.  We could have recieved
861 		     * a reset packet in an interrupt or maybe we slept
862 		     * while doing page faults in uiomove() etc. We could
863 		     * probably recheck again inside the splnet() protection
864 		     * here, but there are probably other places that this
865 		     * also happens.  We must rethink this.
866 		     */
867 		    error = so_pru_send(so, pru_flags, top, addr, control, td);
868 		    if (dontroute)
869 			    so->so_options &= ~SO_DONTROUTE;
870 		    clen = 0;
871 		    control = NULL;
872 		    top = NULL;
873 		    mp = &top;
874 		    if (error)
875 			    goto release;
876 		} while (resid && space > 0);
877 	} while (resid);
878 
879 release:
880 	ssb_unlock(&so->so_snd);
881 out:
882 	if (top)
883 		m_freem(top);
884 	if (control)
885 		m_freem(control);
886 	return (error);
887 }
888 
889 /*
890  * A specialization of sosend() for UDP based on protocol-specific knowledge:
891  *   so->so_proto->pr_flags has the PR_ATOMIC field set.  This means that
892  *	sosendallatonce() returns true,
893  *	the "atomic" variable is true,
894  *	and sosendudp() blocks until space is available for the entire send.
895  *   so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
896  *	PR_IMPLOPCL flags set.
897  *   UDP has no out-of-band data.
898  *   UDP has no control data.
899  *   UDP does not support MSG_EOR.
900  */
901 int
902 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
903 	  struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
904 {
905 	size_t resid;
906 	int error, pru_flags = 0;
907 	int space;
908 
909 	if (td->td_lwp != NULL)
910 		td->td_lwp->lwp_ru.ru_msgsnd++;
911 	if (control)
912 		m_freem(control);
913 
914 	KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
915 	resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len;
916 
917 restart:
918 	error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
919 	if (error)
920 		goto out;
921 
922 	if (so->so_state & SS_CANTSENDMORE)
923 		gotoerr(EPIPE);
924 	if (so->so_error) {
925 		error = so->so_error;
926 		so->so_error = 0;
927 		goto release;
928 	}
929 	if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
930 		gotoerr(EDESTADDRREQ);
931 	if (resid > so->so_snd.ssb_hiwat)
932 		gotoerr(EMSGSIZE);
933 	space = ssb_space(&so->so_snd);
934 	if (uio && (space < 0 || (size_t)space < resid)) {
935 		if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
936 			gotoerr(EWOULDBLOCK);
937 		ssb_unlock(&so->so_snd);
938 		error = ssb_wait(&so->so_snd);
939 		if (error)
940 			goto out;
941 		goto restart;
942 	}
943 
944 	if (uio) {
945 		int hdrlen = max_hdr;
946 
947 		/*
948 		 * We try to optimize out the additional mbuf
949 		 * allocations in M_PREPEND() on output path, e.g.
950 		 * - udp_output(), when it tries to prepend protocol
951 		 *   headers.
952 		 * - Link layer output function, when it tries to
953 		 *   prepend link layer header.
954 		 *
955 		 * This probably will not benefit any data that will
956 		 * be fragmented, so this optimization is only performed
957 		 * when the size of data and max size of protocol+link
958 		 * headers fit into one mbuf cluster.
959 		 */
960 		if (uio->uio_resid > MCLBYTES - hdrlen ||
961 		    !udp_sosend_prepend) {
962 			top = m_uiomove(uio);
963 			if (top == NULL)
964 				goto release;
965 		} else {
966 			int nsize;
967 
968 			top = m_getl(uio->uio_resid + hdrlen, MB_WAIT,
969 			    MT_DATA, M_PKTHDR, &nsize);
970 			KASSERT(nsize >= uio->uio_resid + hdrlen,
971 			    ("sosendudp invalid nsize %d, "
972 			     "resid %zu, hdrlen %d",
973 			     nsize, uio->uio_resid, hdrlen));
974 
975 			top->m_len = uio->uio_resid;
976 			top->m_pkthdr.len = uio->uio_resid;
977 			top->m_data += hdrlen;
978 
979 			error = uiomove(mtod(top, caddr_t), top->m_len, uio);
980 			if (error)
981 				goto out;
982 		}
983 	}
984 
985 	if (flags & MSG_DONTROUTE)
986 		pru_flags |= PRUS_DONTROUTE;
987 
988 	if (udp_sosend_async && (flags & MSG_SYNC) == 0) {
989 		so_pru_send_async(so, pru_flags, top, addr, NULL, td);
990 		error = 0;
991 	} else {
992 		error = so_pru_send(so, pru_flags, top, addr, NULL, td);
993 	}
994 	top = NULL;		/* sent or freed in lower layer */
995 
996 release:
997 	ssb_unlock(&so->so_snd);
998 out:
999 	if (top)
1000 		m_freem(top);
1001 	return (error);
1002 }
1003 
1004 int
1005 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio,
1006 	struct mbuf *top, struct mbuf *control, int flags,
1007 	struct thread *td)
1008 {
1009 	struct mbuf **mp;
1010 	struct mbuf *m;
1011 	size_t resid;
1012 	int space, len;
1013 	int error, mlen;
1014 	int allatonce;
1015 	int pru_flags;
1016 
1017 	if (uio) {
1018 		KKASSERT(top == NULL);
1019 		allatonce = 0;
1020 		resid = uio->uio_resid;
1021 	} else {
1022 		allatonce = 1;
1023 		resid = (size_t)top->m_pkthdr.len;
1024 #ifdef INVARIANTS
1025 		len = 0;
1026 		for (m = top; m; m = m->m_next)
1027 			len += m->m_len;
1028 		KKASSERT(top->m_pkthdr.len == len);
1029 #endif
1030 	}
1031 
1032 	/*
1033 	 * WARNING!  resid is unsigned, space and len are signed.  space
1034 	 * 	     can wind up negative if the sockbuf is overcommitted.
1035 	 *
1036 	 * Also check to make sure that MSG_EOR isn't used on TCP
1037 	 */
1038 	if (flags & MSG_EOR) {
1039 		error = EINVAL;
1040 		goto out;
1041 	}
1042 
1043 	if (control) {
1044 		/* TCP doesn't do control messages (rights, creds, etc) */
1045 		if (control->m_len) {
1046 			error = EINVAL;
1047 			goto out;
1048 		}
1049 		m_freem(control);	/* empty control, just free it */
1050 		control = NULL;
1051 	}
1052 
1053 	if (td->td_lwp != NULL)
1054 		td->td_lwp->lwp_ru.ru_msgsnd++;
1055 
1056 #define	gotoerr(errcode)	{ error = errcode; goto release; }
1057 
1058 restart:
1059 	error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
1060 	if (error)
1061 		goto out;
1062 
1063 	do {
1064 		if (so->so_state & SS_CANTSENDMORE)
1065 			gotoerr(EPIPE);
1066 		if (so->so_error) {
1067 			error = so->so_error;
1068 			so->so_error = 0;
1069 			goto release;
1070 		}
1071 		if ((so->so_state & SS_ISCONNECTED) == 0 &&
1072 		    (so->so_state & SS_ISCONFIRMING) == 0)
1073 			gotoerr(ENOTCONN);
1074 		if (allatonce && resid > so->so_snd.ssb_hiwat)
1075 			gotoerr(EMSGSIZE);
1076 
1077 		space = ssb_space_prealloc(&so->so_snd);
1078 		if (flags & MSG_OOB)
1079 			space += 1024;
1080 		if ((space < 0 || (size_t)space < resid) && !allatonce &&
1081 		    space < so->so_snd.ssb_lowat) {
1082 			if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
1083 				gotoerr(EWOULDBLOCK);
1084 			ssb_unlock(&so->so_snd);
1085 			error = ssb_wait(&so->so_snd);
1086 			if (error)
1087 				goto out;
1088 			goto restart;
1089 		}
1090 		mp = &top;
1091 		do {
1092 		    int cnt = 0, async = 0;
1093 
1094 		    if (uio == NULL) {
1095 			/*
1096 			 * Data is prepackaged in "top".
1097 			 */
1098 			resid = 0;
1099 		    } else do {
1100 			if (resid > INT_MAX)
1101 				resid = INT_MAX;
1102 			m = m_getl((int)resid, MB_WAIT, MT_DATA,
1103 				   top == NULL ? M_PKTHDR : 0, &mlen);
1104 			if (top == NULL) {
1105 				m->m_pkthdr.len = 0;
1106 				m->m_pkthdr.rcvif = NULL;
1107 			}
1108 			len = imin((int)szmin(mlen, resid), space);
1109 			space -= len;
1110 			error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
1111 			resid = uio->uio_resid;
1112 			m->m_len = len;
1113 			*mp = m;
1114 			top->m_pkthdr.len += len;
1115 			if (error)
1116 				goto release;
1117 			mp = &m->m_next;
1118 			if (resid == 0)
1119 				break;
1120 			++cnt;
1121 		    } while (space > 0 && cnt < tcp_sosend_agglim);
1122 
1123 		    if (tcp_sosend_async)
1124 			    async = 1;
1125 
1126 		    if (flags & MSG_OOB) {
1127 		    	    pru_flags = PRUS_OOB;
1128 			    async = 0;
1129 		    } else if ((flags & MSG_EOF) && resid == 0) {
1130 			    pru_flags = PRUS_EOF;
1131 		    } else if (resid > 0 && space > 0) {
1132 			    /* If there is more to send, set PRUS_MORETOCOME */
1133 		    	    pru_flags = PRUS_MORETOCOME;
1134 			    async = 1;
1135 		    } else {
1136 		    	    pru_flags = 0;
1137 		    }
1138 
1139 		    if (flags & MSG_SYNC)
1140 			    async = 0;
1141 
1142 		    /*
1143 		     * XXX all the SS_CANTSENDMORE checks previously
1144 		     * done could be out of date.  We could have recieved
1145 		     * a reset packet in an interrupt or maybe we slept
1146 		     * while doing page faults in uiomove() etc. We could
1147 		     * probably recheck again inside the splnet() protection
1148 		     * here, but there are probably other places that this
1149 		     * also happens.  We must rethink this.
1150 		     */
1151 		    for (m = top; m; m = m->m_next)
1152 			    ssb_preallocstream(&so->so_snd, m);
1153 		    if (!async) {
1154 			    error = so_pru_send(so, pru_flags, top,
1155 			        NULL, NULL, td);
1156 		    } else {
1157 			    so_pru_send_async(so, pru_flags, top,
1158 			        NULL, NULL, td);
1159 			    error = 0;
1160 		    }
1161 
1162 		    top = NULL;
1163 		    mp = &top;
1164 		    if (error)
1165 			    goto release;
1166 		} while (resid && space > 0);
1167 	} while (resid);
1168 
1169 release:
1170 	ssb_unlock(&so->so_snd);
1171 out:
1172 	if (top)
1173 		m_freem(top);
1174 	if (control)
1175 		m_freem(control);
1176 	return (error);
1177 }
1178 
1179 /*
1180  * Implement receive operations on a socket.
1181  *
1182  * We depend on the way that records are added to the signalsockbuf
1183  * by sbappend*.  In particular, each record (mbufs linked through m_next)
1184  * must begin with an address if the protocol so specifies,
1185  * followed by an optional mbuf or mbufs containing ancillary data,
1186  * and then zero or more mbufs of data.
1187  *
1188  * Although the signalsockbuf is locked, new data may still be appended.
1189  * A token inside the ssb_lock deals with MP issues and still allows
1190  * the network to access the socket if we block in a uio.
1191  *
1192  * The caller may receive the data as a single mbuf chain by supplying
1193  * an mbuf **mp0 for use in returning the chain.  The uio is then used
1194  * only for the count in uio_resid.
1195  */
1196 int
1197 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
1198 	  struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
1199 {
1200 	struct mbuf *m, *n;
1201 	struct mbuf *free_chain = NULL;
1202 	int flags, len, error, offset;
1203 	struct protosw *pr = so->so_proto;
1204 	int moff, type = 0;
1205 	size_t resid, orig_resid;
1206 
1207 	if (uio)
1208 		resid = uio->uio_resid;
1209 	else
1210 		resid = (size_t)(sio->sb_climit - sio->sb_cc);
1211 	orig_resid = resid;
1212 
1213 	if (psa)
1214 		*psa = NULL;
1215 	if (controlp)
1216 		*controlp = NULL;
1217 	if (flagsp)
1218 		flags = *flagsp &~ MSG_EOR;
1219 	else
1220 		flags = 0;
1221 	if (flags & MSG_OOB) {
1222 		m = m_get(MB_WAIT, MT_DATA);
1223 		if (m == NULL)
1224 			return (ENOBUFS);
1225 		error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
1226 		if (error)
1227 			goto bad;
1228 		if (sio) {
1229 			do {
1230 				sbappend(sio, m);
1231 				KKASSERT(resid >= (size_t)m->m_len);
1232 				resid -= (size_t)m->m_len;
1233 			} while (resid > 0 && m);
1234 		} else {
1235 			do {
1236 				uio->uio_resid = resid;
1237 				error = uiomove(mtod(m, caddr_t),
1238 						(int)szmin(resid, m->m_len),
1239 						uio);
1240 				resid = uio->uio_resid;
1241 				m = m_free(m);
1242 			} while (uio->uio_resid && error == 0 && m);
1243 		}
1244 bad:
1245 		if (m)
1246 			m_freem(m);
1247 		return (error);
1248 	}
1249 	if ((so->so_state & SS_ISCONFIRMING) && resid)
1250 		so_pru_rcvd(so, 0);
1251 
1252 	/*
1253 	 * The token interlocks against the protocol thread while
1254 	 * ssb_lock is a blocking lock against other userland entities.
1255 	 */
1256 	lwkt_gettoken(&so->so_rcv.ssb_token);
1257 restart:
1258 	error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
1259 	if (error)
1260 		goto done;
1261 
1262 	m = so->so_rcv.ssb_mb;
1263 	/*
1264 	 * If we have less data than requested, block awaiting more
1265 	 * (subject to any timeout) if:
1266 	 *   1. the current count is less than the low water mark, or
1267 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
1268 	 *	receive operation at once if we block (resid <= hiwat).
1269 	 *   3. MSG_DONTWAIT is not set
1270 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1271 	 * we have to do the receive in sections, and thus risk returning
1272 	 * a short count if a timeout or signal occurs after we start.
1273 	 */
1274 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1275 	    (size_t)so->so_rcv.ssb_cc < resid) &&
1276 	    (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
1277 	    ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) &&
1278 	    m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1279 		KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
1280 		if (so->so_error) {
1281 			if (m)
1282 				goto dontblock;
1283 			error = so->so_error;
1284 			if ((flags & MSG_PEEK) == 0)
1285 				so->so_error = 0;
1286 			goto release;
1287 		}
1288 		if (so->so_state & SS_CANTRCVMORE) {
1289 			if (m)
1290 				goto dontblock;
1291 			else
1292 				goto release;
1293 		}
1294 		for (; m; m = m->m_next) {
1295 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
1296 				m = so->so_rcv.ssb_mb;
1297 				goto dontblock;
1298 			}
1299 		}
1300 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1301 		    (pr->pr_flags & PR_CONNREQUIRED)) {
1302 			error = ENOTCONN;
1303 			goto release;
1304 		}
1305 		if (resid == 0)
1306 			goto release;
1307 		if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
1308 			error = EWOULDBLOCK;
1309 			goto release;
1310 		}
1311 		ssb_unlock(&so->so_rcv);
1312 		error = ssb_wait(&so->so_rcv);
1313 		if (error)
1314 			goto done;
1315 		goto restart;
1316 	}
1317 dontblock:
1318 	if (uio && uio->uio_td && uio->uio_td->td_proc)
1319 		uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
1320 
1321 	/*
1322 	 * note: m should be == sb_mb here.  Cache the next record while
1323 	 * cleaning up.  Note that calling m_free*() will break out critical
1324 	 * section.
1325 	 */
1326 	KKASSERT(m == so->so_rcv.ssb_mb);
1327 
1328 	/*
1329 	 * Skip any address mbufs prepending the record.
1330 	 */
1331 	if (pr->pr_flags & PR_ADDR) {
1332 		KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1333 		orig_resid = 0;
1334 		if (psa)
1335 			*psa = dup_sockaddr(mtod(m, struct sockaddr *));
1336 		if (flags & MSG_PEEK)
1337 			m = m->m_next;
1338 		else
1339 			m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1340 	}
1341 
1342 	/*
1343 	 * Skip any control mbufs prepending the record.
1344 	 */
1345 #ifdef SCTP
1346 	if (pr->pr_flags & PR_ADDR_OPT) {
1347 		/*
1348 		 * For SCTP we may be getting a
1349 		 * whole message OR a partial delivery.
1350 		 */
1351 		if (m && m->m_type == MT_SONAME) {
1352 			orig_resid = 0;
1353 			if (psa)
1354 				*psa = dup_sockaddr(mtod(m, struct sockaddr *));
1355 			if (flags & MSG_PEEK)
1356 				m = m->m_next;
1357 			else
1358 				m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1359 		}
1360 	}
1361 #endif /* SCTP */
1362 	while (m && m->m_type == MT_CONTROL && error == 0) {
1363 		if (flags & MSG_PEEK) {
1364 			if (controlp)
1365 				*controlp = m_copy(m, 0, m->m_len);
1366 			m = m->m_next;	/* XXX race */
1367 		} else {
1368 			if (controlp) {
1369 				n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1370 				if (pr->pr_domain->dom_externalize &&
1371 				    mtod(m, struct cmsghdr *)->cmsg_type ==
1372 				    SCM_RIGHTS)
1373 				   error = (*pr->pr_domain->dom_externalize)(m);
1374 				*controlp = m;
1375 				m = n;
1376 			} else {
1377 				m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1378 			}
1379 		}
1380 		if (controlp && *controlp) {
1381 			orig_resid = 0;
1382 			controlp = &(*controlp)->m_next;
1383 		}
1384 	}
1385 
1386 	/*
1387 	 * flag OOB data.
1388 	 */
1389 	if (m) {
1390 		type = m->m_type;
1391 		if (type == MT_OOBDATA)
1392 			flags |= MSG_OOB;
1393 	}
1394 
1395 	/*
1396 	 * Copy to the UIO or mbuf return chain (*mp).
1397 	 */
1398 	moff = 0;
1399 	offset = 0;
1400 	while (m && resid > 0 && error == 0) {
1401 		if (m->m_type == MT_OOBDATA) {
1402 			if (type != MT_OOBDATA)
1403 				break;
1404 		} else if (type == MT_OOBDATA)
1405 			break;
1406 		else
1407 		    KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1408 			("receive 3"));
1409 		soclrstate(so, SS_RCVATMARK);
1410 		len = (resid > INT_MAX) ? INT_MAX : resid;
1411 		if (so->so_oobmark && len > so->so_oobmark - offset)
1412 			len = so->so_oobmark - offset;
1413 		if (len > m->m_len - moff)
1414 			len = m->m_len - moff;
1415 
1416 		/*
1417 		 * Copy out to the UIO or pass the mbufs back to the SIO.
1418 		 * The SIO is dealt with when we eat the mbuf, but deal
1419 		 * with the resid here either way.
1420 		 */
1421 		if (uio) {
1422 			uio->uio_resid = resid;
1423 			error = uiomove(mtod(m, caddr_t) + moff, len, uio);
1424 			resid = uio->uio_resid;
1425 			if (error)
1426 				goto release;
1427 		} else {
1428 			resid -= (size_t)len;
1429 		}
1430 
1431 		/*
1432 		 * Eat the entire mbuf or just a piece of it
1433 		 */
1434 		if (len == m->m_len - moff) {
1435 			if (m->m_flags & M_EOR)
1436 				flags |= MSG_EOR;
1437 #ifdef SCTP
1438 			if (m->m_flags & M_NOTIFICATION)
1439 				flags |= MSG_NOTIFICATION;
1440 #endif /* SCTP */
1441 			if (flags & MSG_PEEK) {
1442 				m = m->m_next;
1443 				moff = 0;
1444 			} else {
1445 				if (sio) {
1446 					n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1447 					sbappend(sio, m);
1448 					m = n;
1449 				} else {
1450 					m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1451 				}
1452 			}
1453 		} else {
1454 			if (flags & MSG_PEEK) {
1455 				moff += len;
1456 			} else {
1457 				if (sio) {
1458 					n = m_copym(m, 0, len, MB_WAIT);
1459 					if (n)
1460 						sbappend(sio, n);
1461 				}
1462 				m->m_data += len;
1463 				m->m_len -= len;
1464 				so->so_rcv.ssb_cc -= len;
1465 			}
1466 		}
1467 		if (so->so_oobmark) {
1468 			if ((flags & MSG_PEEK) == 0) {
1469 				so->so_oobmark -= len;
1470 				if (so->so_oobmark == 0) {
1471 					sosetstate(so, SS_RCVATMARK);
1472 					break;
1473 				}
1474 			} else {
1475 				offset += len;
1476 				if (offset == so->so_oobmark)
1477 					break;
1478 			}
1479 		}
1480 		if (flags & MSG_EOR)
1481 			break;
1482 		/*
1483 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
1484 		 * we must not quit until resid == 0 or an error
1485 		 * termination.  If a signal/timeout occurs, return
1486 		 * with a short count but without error.
1487 		 * Keep signalsockbuf locked against other readers.
1488 		 */
1489 		while ((flags & MSG_WAITALL) && m == NULL &&
1490 		       resid > 0 && !sosendallatonce(so) &&
1491 		       so->so_rcv.ssb_mb == NULL) {
1492 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
1493 				break;
1494 			/*
1495 			 * The window might have closed to zero, make
1496 			 * sure we send an ack now that we've drained
1497 			 * the buffer or we might end up blocking until
1498 			 * the idle takes over (5 seconds).
1499 			 */
1500 			if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1501 				so_pru_rcvd(so, flags);
1502 			error = ssb_wait(&so->so_rcv);
1503 			if (error) {
1504 				ssb_unlock(&so->so_rcv);
1505 				error = 0;
1506 				goto done;
1507 			}
1508 			m = so->so_rcv.ssb_mb;
1509 		}
1510 	}
1511 
1512 	/*
1513 	 * If an atomic read was requested but unread data still remains
1514 	 * in the record, set MSG_TRUNC.
1515 	 */
1516 	if (m && pr->pr_flags & PR_ATOMIC)
1517 		flags |= MSG_TRUNC;
1518 
1519 	/*
1520 	 * Cleanup.  If an atomic read was requested drop any unread data.
1521 	 */
1522 	if ((flags & MSG_PEEK) == 0) {
1523 		if (m && (pr->pr_flags & PR_ATOMIC))
1524 			sbdroprecord(&so->so_rcv.sb);
1525 		if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1526 			so_pru_rcvd(so, flags);
1527 	}
1528 
1529 	if (orig_resid == resid && orig_resid &&
1530 	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1531 		ssb_unlock(&so->so_rcv);
1532 		goto restart;
1533 	}
1534 
1535 	if (flagsp)
1536 		*flagsp |= flags;
1537 release:
1538 	ssb_unlock(&so->so_rcv);
1539 done:
1540 	lwkt_reltoken(&so->so_rcv.ssb_token);
1541 	if (free_chain)
1542 		m_freem(free_chain);
1543 	return (error);
1544 }
1545 
1546 int
1547 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio,
1548 	  struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
1549 {
1550 	struct mbuf *m, *n;
1551 	struct mbuf *free_chain = NULL;
1552 	int flags, len, error, offset;
1553 	struct protosw *pr = so->so_proto;
1554 	int moff;
1555 	size_t resid, orig_resid;
1556 
1557 	if (uio)
1558 		resid = uio->uio_resid;
1559 	else
1560 		resid = (size_t)(sio->sb_climit - sio->sb_cc);
1561 	orig_resid = resid;
1562 
1563 	if (psa)
1564 		*psa = NULL;
1565 	if (controlp)
1566 		*controlp = NULL;
1567 	if (flagsp)
1568 		flags = *flagsp &~ MSG_EOR;
1569 	else
1570 		flags = 0;
1571 	if (flags & MSG_OOB) {
1572 		m = m_get(MB_WAIT, MT_DATA);
1573 		if (m == NULL)
1574 			return (ENOBUFS);
1575 		error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
1576 		if (error)
1577 			goto bad;
1578 		if (sio) {
1579 			do {
1580 				sbappend(sio, m);
1581 				KKASSERT(resid >= (size_t)m->m_len);
1582 				resid -= (size_t)m->m_len;
1583 			} while (resid > 0 && m);
1584 		} else {
1585 			do {
1586 				uio->uio_resid = resid;
1587 				error = uiomove(mtod(m, caddr_t),
1588 						(int)szmin(resid, m->m_len),
1589 						uio);
1590 				resid = uio->uio_resid;
1591 				m = m_free(m);
1592 			} while (uio->uio_resid && error == 0 && m);
1593 		}
1594 bad:
1595 		if (m)
1596 			m_freem(m);
1597 		return (error);
1598 	}
1599 
1600 	/*
1601 	 * The token interlocks against the protocol thread while
1602 	 * ssb_lock is a blocking lock against other userland entities.
1603 	 */
1604 	lwkt_gettoken(&so->so_rcv.ssb_token);
1605 restart:
1606 	error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
1607 	if (error)
1608 		goto done;
1609 
1610 	m = so->so_rcv.ssb_mb;
1611 	/*
1612 	 * If we have less data than requested, block awaiting more
1613 	 * (subject to any timeout) if:
1614 	 *   1. the current count is less than the low water mark, or
1615 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
1616 	 *	receive operation at once if we block (resid <= hiwat).
1617 	 *   3. MSG_DONTWAIT is not set
1618 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1619 	 * we have to do the receive in sections, and thus risk returning
1620 	 * a short count if a timeout or signal occurs after we start.
1621 	 */
1622 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1623 	    (size_t)so->so_rcv.ssb_cc < resid) &&
1624 	    (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
1625 	   ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) {
1626 		KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
1627 		if (so->so_error) {
1628 			if (m)
1629 				goto dontblock;
1630 			error = so->so_error;
1631 			if ((flags & MSG_PEEK) == 0)
1632 				so->so_error = 0;
1633 			goto release;
1634 		}
1635 		if (so->so_state & SS_CANTRCVMORE) {
1636 			if (m)
1637 				goto dontblock;
1638 			else
1639 				goto release;
1640 		}
1641 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1642 		    (pr->pr_flags & PR_CONNREQUIRED)) {
1643 			error = ENOTCONN;
1644 			goto release;
1645 		}
1646 		if (resid == 0)
1647 			goto release;
1648 		if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
1649 			error = EWOULDBLOCK;
1650 			goto release;
1651 		}
1652 		ssb_unlock(&so->so_rcv);
1653 		error = ssb_wait(&so->so_rcv);
1654 		if (error)
1655 			goto done;
1656 		goto restart;
1657 	}
1658 dontblock:
1659 	if (uio && uio->uio_td && uio->uio_td->td_proc)
1660 		uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
1661 
1662 	/*
1663 	 * note: m should be == sb_mb here.  Cache the next record while
1664 	 * cleaning up.  Note that calling m_free*() will break out critical
1665 	 * section.
1666 	 */
1667 	KKASSERT(m == so->so_rcv.ssb_mb);
1668 
1669 	/*
1670 	 * Copy to the UIO or mbuf return chain (*mp).
1671 	 */
1672 	moff = 0;
1673 	offset = 0;
1674 	while (m && resid > 0 && error == 0) {
1675 		KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1676 		    ("receive 3"));
1677 
1678 		soclrstate(so, SS_RCVATMARK);
1679 		len = (resid > INT_MAX) ? INT_MAX : resid;
1680 		if (so->so_oobmark && len > so->so_oobmark - offset)
1681 			len = so->so_oobmark - offset;
1682 		if (len > m->m_len - moff)
1683 			len = m->m_len - moff;
1684 
1685 		/*
1686 		 * Copy out to the UIO or pass the mbufs back to the SIO.
1687 		 * The SIO is dealt with when we eat the mbuf, but deal
1688 		 * with the resid here either way.
1689 		 */
1690 		if (uio) {
1691 			uio->uio_resid = resid;
1692 			error = uiomove(mtod(m, caddr_t) + moff, len, uio);
1693 			resid = uio->uio_resid;
1694 			if (error)
1695 				goto release;
1696 		} else {
1697 			resid -= (size_t)len;
1698 		}
1699 
1700 		/*
1701 		 * Eat the entire mbuf or just a piece of it
1702 		 */
1703 		if (len == m->m_len - moff) {
1704 			if (flags & MSG_PEEK) {
1705 				m = m->m_next;
1706 				moff = 0;
1707 			} else {
1708 				if (sio) {
1709 					n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1710 					sbappend(sio, m);
1711 					m = n;
1712 				} else {
1713 					m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1714 				}
1715 			}
1716 		} else {
1717 			if (flags & MSG_PEEK) {
1718 				moff += len;
1719 			} else {
1720 				if (sio) {
1721 					n = m_copym(m, 0, len, MB_WAIT);
1722 					if (n)
1723 						sbappend(sio, n);
1724 				}
1725 				m->m_data += len;
1726 				m->m_len -= len;
1727 				so->so_rcv.ssb_cc -= len;
1728 			}
1729 		}
1730 		if (so->so_oobmark) {
1731 			if ((flags & MSG_PEEK) == 0) {
1732 				so->so_oobmark -= len;
1733 				if (so->so_oobmark == 0) {
1734 					sosetstate(so, SS_RCVATMARK);
1735 					break;
1736 				}
1737 			} else {
1738 				offset += len;
1739 				if (offset == so->so_oobmark)
1740 					break;
1741 			}
1742 		}
1743 		/*
1744 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
1745 		 * we must not quit until resid == 0 or an error
1746 		 * termination.  If a signal/timeout occurs, return
1747 		 * with a short count but without error.
1748 		 * Keep signalsockbuf locked against other readers.
1749 		 */
1750 		while ((flags & MSG_WAITALL) && m == NULL &&
1751 		       resid > 0 && !sosendallatonce(so) &&
1752 		       so->so_rcv.ssb_mb == NULL) {
1753 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
1754 				break;
1755 			/*
1756 			 * The window might have closed to zero, make
1757 			 * sure we send an ack now that we've drained
1758 			 * the buffer or we might end up blocking until
1759 			 * the idle takes over (5 seconds).
1760 			 */
1761 			if (so->so_pcb)
1762 				so_pru_rcvd_async(so);
1763 			error = ssb_wait(&so->so_rcv);
1764 			if (error) {
1765 				ssb_unlock(&so->so_rcv);
1766 				error = 0;
1767 				goto done;
1768 			}
1769 			m = so->so_rcv.ssb_mb;
1770 		}
1771 	}
1772 
1773 	/*
1774 	 * Cleanup.  If an atomic read was requested drop any unread data.
1775 	 */
1776 	if ((flags & MSG_PEEK) == 0) {
1777 		if (so->so_pcb)
1778 			so_pru_rcvd_async(so);
1779 	}
1780 
1781 	if (orig_resid == resid && orig_resid &&
1782 	    (so->so_state & SS_CANTRCVMORE) == 0) {
1783 		ssb_unlock(&so->so_rcv);
1784 		goto restart;
1785 	}
1786 
1787 	if (flagsp)
1788 		*flagsp |= flags;
1789 release:
1790 	ssb_unlock(&so->so_rcv);
1791 done:
1792 	lwkt_reltoken(&so->so_rcv.ssb_token);
1793 	if (free_chain)
1794 		m_freem(free_chain);
1795 	return (error);
1796 }
1797 
1798 /*
1799  * Shut a socket down.  Note that we do not get a frontend lock as we
1800  * want to be able to shut the socket down even if another thread is
1801  * blocked in a read(), thus waking it up.
1802  */
1803 int
1804 soshutdown(struct socket *so, int how)
1805 {
1806 	if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1807 		return (EINVAL);
1808 
1809 	if (how != SHUT_WR) {
1810 		/*ssb_lock(&so->so_rcv, M_WAITOK);*/
1811 		sorflush(so);
1812 		/*ssb_unlock(&so->so_rcv);*/
1813 	}
1814 	if (how != SHUT_RD)
1815 		return (so_pru_shutdown(so));
1816 	return (0);
1817 }
1818 
1819 void
1820 sorflush(struct socket *so)
1821 {
1822 	struct signalsockbuf *ssb = &so->so_rcv;
1823 	struct protosw *pr = so->so_proto;
1824 	struct signalsockbuf asb;
1825 
1826 	atomic_set_int(&ssb->ssb_flags, SSB_NOINTR);
1827 
1828 	lwkt_gettoken(&ssb->ssb_token);
1829 	socantrcvmore(so);
1830 	asb = *ssb;
1831 
1832 	/*
1833 	 * Can't just blow up the ssb structure here
1834 	 */
1835 	bzero(&ssb->sb, sizeof(ssb->sb));
1836 	ssb->ssb_timeo = 0;
1837 	ssb->ssb_lowat = 0;
1838 	ssb->ssb_hiwat = 0;
1839 	ssb->ssb_mbmax = 0;
1840 	atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK);
1841 
1842 	if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose)
1843 		(*pr->pr_domain->dom_dispose)(asb.ssb_mb);
1844 	ssb_release(&asb, so);
1845 
1846 	lwkt_reltoken(&ssb->ssb_token);
1847 }
1848 
1849 #ifdef INET
1850 static int
1851 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt)
1852 {
1853 	struct accept_filter_arg	*afap = NULL;
1854 	struct accept_filter	*afp;
1855 	struct so_accf	*af = so->so_accf;
1856 	int	error = 0;
1857 
1858 	/* do not set/remove accept filters on non listen sockets */
1859 	if ((so->so_options & SO_ACCEPTCONN) == 0) {
1860 		error = EINVAL;
1861 		goto out;
1862 	}
1863 
1864 	/* removing the filter */
1865 	if (sopt == NULL) {
1866 		if (af != NULL) {
1867 			if (af->so_accept_filter != NULL &&
1868 				af->so_accept_filter->accf_destroy != NULL) {
1869 				af->so_accept_filter->accf_destroy(so);
1870 			}
1871 			if (af->so_accept_filter_str != NULL) {
1872 				kfree(af->so_accept_filter_str, M_ACCF);
1873 			}
1874 			kfree(af, M_ACCF);
1875 			so->so_accf = NULL;
1876 		}
1877 		so->so_options &= ~SO_ACCEPTFILTER;
1878 		return (0);
1879 	}
1880 	/* adding a filter */
1881 	/* must remove previous filter first */
1882 	if (af != NULL) {
1883 		error = EINVAL;
1884 		goto out;
1885 	}
1886 	/* don't put large objects on the kernel stack */
1887 	afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK);
1888 	error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1889 	afap->af_name[sizeof(afap->af_name)-1] = '\0';
1890 	afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1891 	if (error)
1892 		goto out;
1893 	afp = accept_filt_get(afap->af_name);
1894 	if (afp == NULL) {
1895 		error = ENOENT;
1896 		goto out;
1897 	}
1898 	af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO);
1899 	if (afp->accf_create != NULL) {
1900 		if (afap->af_name[0] != '\0') {
1901 			int len = strlen(afap->af_name) + 1;
1902 
1903 			af->so_accept_filter_str = kmalloc(len, M_ACCF,
1904 							   M_WAITOK);
1905 			strcpy(af->so_accept_filter_str, afap->af_name);
1906 		}
1907 		af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1908 		if (af->so_accept_filter_arg == NULL) {
1909 			kfree(af->so_accept_filter_str, M_ACCF);
1910 			kfree(af, M_ACCF);
1911 			so->so_accf = NULL;
1912 			error = EINVAL;
1913 			goto out;
1914 		}
1915 	}
1916 	af->so_accept_filter = afp;
1917 	so->so_accf = af;
1918 	so->so_options |= SO_ACCEPTFILTER;
1919 out:
1920 	if (afap != NULL)
1921 		kfree(afap, M_TEMP);
1922 	return (error);
1923 }
1924 #endif /* INET */
1925 
1926 /*
1927  * Perhaps this routine, and sooptcopyout(), below, ought to come in
1928  * an additional variant to handle the case where the option value needs
1929  * to be some kind of integer, but not a specific size.
1930  * In addition to their use here, these functions are also called by the
1931  * protocol-level pr_ctloutput() routines.
1932  */
1933 int
1934 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1935 {
1936 	return soopt_to_kbuf(sopt, buf, len, minlen);
1937 }
1938 
1939 int
1940 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1941 {
1942 	size_t	valsize;
1943 
1944 	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
1945 	KKASSERT(kva_p(buf));
1946 
1947 	/*
1948 	 * If the user gives us more than we wanted, we ignore it,
1949 	 * but if we don't get the minimum length the caller
1950 	 * wants, we return EINVAL.  On success, sopt->sopt_valsize
1951 	 * is set to however much we actually retrieved.
1952 	 */
1953 	if ((valsize = sopt->sopt_valsize) < minlen)
1954 		return EINVAL;
1955 	if (valsize > len)
1956 		sopt->sopt_valsize = valsize = len;
1957 
1958 	bcopy(sopt->sopt_val, buf, valsize);
1959 	return 0;
1960 }
1961 
1962 
1963 int
1964 sosetopt(struct socket *so, struct sockopt *sopt)
1965 {
1966 	int	error, optval;
1967 	struct	linger l;
1968 	struct	timeval tv;
1969 	u_long  val;
1970 	struct signalsockbuf *sotmp;
1971 
1972 	error = 0;
1973 	sopt->sopt_dir = SOPT_SET;
1974 	if (sopt->sopt_level != SOL_SOCKET) {
1975 		if (so->so_proto && so->so_proto->pr_ctloutput) {
1976 			return (so_pr_ctloutput(so, sopt));
1977 		}
1978 		error = ENOPROTOOPT;
1979 	} else {
1980 		switch (sopt->sopt_name) {
1981 #ifdef INET
1982 		case SO_ACCEPTFILTER:
1983 			error = do_setopt_accept_filter(so, sopt);
1984 			if (error)
1985 				goto bad;
1986 			break;
1987 #endif /* INET */
1988 		case SO_LINGER:
1989 			error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1990 			if (error)
1991 				goto bad;
1992 
1993 			so->so_linger = l.l_linger;
1994 			if (l.l_onoff)
1995 				so->so_options |= SO_LINGER;
1996 			else
1997 				so->so_options &= ~SO_LINGER;
1998 			break;
1999 
2000 		case SO_DEBUG:
2001 		case SO_KEEPALIVE:
2002 		case SO_DONTROUTE:
2003 		case SO_USELOOPBACK:
2004 		case SO_BROADCAST:
2005 		case SO_REUSEADDR:
2006 		case SO_REUSEPORT:
2007 		case SO_OOBINLINE:
2008 		case SO_TIMESTAMP:
2009 		case SO_NOSIGPIPE:
2010 			error = sooptcopyin(sopt, &optval, sizeof optval,
2011 					    sizeof optval);
2012 			if (error)
2013 				goto bad;
2014 			if (optval)
2015 				so->so_options |= sopt->sopt_name;
2016 			else
2017 				so->so_options &= ~sopt->sopt_name;
2018 			break;
2019 
2020 		case SO_SNDBUF:
2021 		case SO_RCVBUF:
2022 		case SO_SNDLOWAT:
2023 		case SO_RCVLOWAT:
2024 			error = sooptcopyin(sopt, &optval, sizeof optval,
2025 					    sizeof optval);
2026 			if (error)
2027 				goto bad;
2028 
2029 			/*
2030 			 * Values < 1 make no sense for any of these
2031 			 * options, so disallow them.
2032 			 */
2033 			if (optval < 1) {
2034 				error = EINVAL;
2035 				goto bad;
2036 			}
2037 
2038 			switch (sopt->sopt_name) {
2039 			case SO_SNDBUF:
2040 			case SO_RCVBUF:
2041 				if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ?
2042 				    &so->so_snd : &so->so_rcv, (u_long)optval,
2043 				    so,
2044 				    &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
2045 					error = ENOBUFS;
2046 					goto bad;
2047 				}
2048 				sotmp = (sopt->sopt_name == SO_SNDBUF) ?
2049 						&so->so_snd : &so->so_rcv;
2050 				atomic_clear_int(&sotmp->ssb_flags,
2051 						 SSB_AUTOSIZE);
2052 				break;
2053 
2054 			/*
2055 			 * Make sure the low-water is never greater than
2056 			 * the high-water.
2057 			 */
2058 			case SO_SNDLOWAT:
2059 				so->so_snd.ssb_lowat =
2060 				    (optval > so->so_snd.ssb_hiwat) ?
2061 				    so->so_snd.ssb_hiwat : optval;
2062 				atomic_clear_int(&so->so_snd.ssb_flags,
2063 						 SSB_AUTOLOWAT);
2064 				break;
2065 			case SO_RCVLOWAT:
2066 				so->so_rcv.ssb_lowat =
2067 				    (optval > so->so_rcv.ssb_hiwat) ?
2068 				    so->so_rcv.ssb_hiwat : optval;
2069 				atomic_clear_int(&so->so_rcv.ssb_flags,
2070 						 SSB_AUTOLOWAT);
2071 				break;
2072 			}
2073 			break;
2074 
2075 		case SO_SNDTIMEO:
2076 		case SO_RCVTIMEO:
2077 			error = sooptcopyin(sopt, &tv, sizeof tv,
2078 					    sizeof tv);
2079 			if (error)
2080 				goto bad;
2081 
2082 			/* assert(hz > 0); */
2083 			if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2084 			    tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2085 				error = EDOM;
2086 				goto bad;
2087 			}
2088 			/* assert(tick > 0); */
2089 			/* assert(ULONG_MAX - INT_MAX >= 1000000); */
2090 			val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick;
2091 			if (val > INT_MAX) {
2092 				error = EDOM;
2093 				goto bad;
2094 			}
2095 			if (val == 0 && tv.tv_usec != 0)
2096 				val = 1;
2097 
2098 			switch (sopt->sopt_name) {
2099 			case SO_SNDTIMEO:
2100 				so->so_snd.ssb_timeo = val;
2101 				break;
2102 			case SO_RCVTIMEO:
2103 				so->so_rcv.ssb_timeo = val;
2104 				break;
2105 			}
2106 			break;
2107 		default:
2108 			error = ENOPROTOOPT;
2109 			break;
2110 		}
2111 		if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
2112 			(void) so_pr_ctloutput(so, sopt);
2113 		}
2114 	}
2115 bad:
2116 	return (error);
2117 }
2118 
2119 /* Helper routine for getsockopt */
2120 int
2121 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2122 {
2123 	soopt_from_kbuf(sopt, buf, len);
2124 	return 0;
2125 }
2126 
2127 void
2128 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len)
2129 {
2130 	size_t	valsize;
2131 
2132 	if (len == 0) {
2133 		sopt->sopt_valsize = 0;
2134 		return;
2135 	}
2136 
2137 	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2138 	KKASSERT(kva_p(buf));
2139 
2140 	/*
2141 	 * Documented get behavior is that we always return a value,
2142 	 * possibly truncated to fit in the user's buffer.
2143 	 * Traditional behavior is that we always tell the user
2144 	 * precisely how much we copied, rather than something useful
2145 	 * like the total amount we had available for her.
2146 	 * Note that this interface is not idempotent; the entire answer must
2147 	 * generated ahead of time.
2148 	 */
2149 	valsize = szmin(len, sopt->sopt_valsize);
2150 	sopt->sopt_valsize = valsize;
2151 	if (sopt->sopt_val != 0) {
2152 		bcopy(buf, sopt->sopt_val, valsize);
2153 	}
2154 }
2155 
2156 int
2157 sogetopt(struct socket *so, struct sockopt *sopt)
2158 {
2159 	int	error, optval;
2160 	long	optval_l;
2161 	struct	linger l;
2162 	struct	timeval tv;
2163 #ifdef INET
2164 	struct accept_filter_arg *afap;
2165 #endif
2166 
2167 	error = 0;
2168 	sopt->sopt_dir = SOPT_GET;
2169 	if (sopt->sopt_level != SOL_SOCKET) {
2170 		if (so->so_proto && so->so_proto->pr_ctloutput) {
2171 			return (so_pr_ctloutput(so, sopt));
2172 		} else
2173 			return (ENOPROTOOPT);
2174 	} else {
2175 		switch (sopt->sopt_name) {
2176 #ifdef INET
2177 		case SO_ACCEPTFILTER:
2178 			if ((so->so_options & SO_ACCEPTCONN) == 0)
2179 				return (EINVAL);
2180 			afap = kmalloc(sizeof(*afap), M_TEMP,
2181 				       M_WAITOK | M_ZERO);
2182 			if ((so->so_options & SO_ACCEPTFILTER) != 0) {
2183 				strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
2184 				if (so->so_accf->so_accept_filter_str != NULL)
2185 					strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
2186 			}
2187 			error = sooptcopyout(sopt, afap, sizeof(*afap));
2188 			kfree(afap, M_TEMP);
2189 			break;
2190 #endif /* INET */
2191 
2192 		case SO_LINGER:
2193 			l.l_onoff = so->so_options & SO_LINGER;
2194 			l.l_linger = so->so_linger;
2195 			error = sooptcopyout(sopt, &l, sizeof l);
2196 			break;
2197 
2198 		case SO_USELOOPBACK:
2199 		case SO_DONTROUTE:
2200 		case SO_DEBUG:
2201 		case SO_KEEPALIVE:
2202 		case SO_REUSEADDR:
2203 		case SO_REUSEPORT:
2204 		case SO_BROADCAST:
2205 		case SO_OOBINLINE:
2206 		case SO_TIMESTAMP:
2207 		case SO_NOSIGPIPE:
2208 			optval = so->so_options & sopt->sopt_name;
2209 integer:
2210 			error = sooptcopyout(sopt, &optval, sizeof optval);
2211 			break;
2212 
2213 		case SO_TYPE:
2214 			optval = so->so_type;
2215 			goto integer;
2216 
2217 		case SO_ERROR:
2218 			optval = so->so_error;
2219 			so->so_error = 0;
2220 			goto integer;
2221 
2222 		case SO_SNDBUF:
2223 			optval = so->so_snd.ssb_hiwat;
2224 			goto integer;
2225 
2226 		case SO_RCVBUF:
2227 			optval = so->so_rcv.ssb_hiwat;
2228 			goto integer;
2229 
2230 		case SO_SNDLOWAT:
2231 			optval = so->so_snd.ssb_lowat;
2232 			goto integer;
2233 
2234 		case SO_RCVLOWAT:
2235 			optval = so->so_rcv.ssb_lowat;
2236 			goto integer;
2237 
2238 		case SO_SNDTIMEO:
2239 		case SO_RCVTIMEO:
2240 			optval = (sopt->sopt_name == SO_SNDTIMEO ?
2241 				  so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo);
2242 
2243 			tv.tv_sec = optval / hz;
2244 			tv.tv_usec = (optval % hz) * ustick;
2245 			error = sooptcopyout(sopt, &tv, sizeof tv);
2246 			break;
2247 
2248 		case SO_SNDSPACE:
2249 			optval_l = ssb_space(&so->so_snd);
2250 			error = sooptcopyout(sopt, &optval_l, sizeof(optval_l));
2251 			break;
2252 
2253 		default:
2254 			error = ENOPROTOOPT;
2255 			break;
2256 		}
2257 		return (error);
2258 	}
2259 }
2260 
2261 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2262 int
2263 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2264 {
2265 	struct mbuf *m, *m_prev;
2266 	int sopt_size = sopt->sopt_valsize, msize;
2267 
2268 	m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA,
2269 		   0, &msize);
2270 	if (m == NULL)
2271 		return (ENOBUFS);
2272 	m->m_len = min(msize, sopt_size);
2273 	sopt_size -= m->m_len;
2274 	*mp = m;
2275 	m_prev = m;
2276 
2277 	while (sopt_size > 0) {
2278 		m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT,
2279 			   MT_DATA, 0, &msize);
2280 		if (m == NULL) {
2281 			m_freem(*mp);
2282 			return (ENOBUFS);
2283 		}
2284 		m->m_len = min(msize, sopt_size);
2285 		sopt_size -= m->m_len;
2286 		m_prev->m_next = m;
2287 		m_prev = m;
2288 	}
2289 	return (0);
2290 }
2291 
2292 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2293 int
2294 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2295 {
2296 	soopt_to_mbuf(sopt, m);
2297 	return 0;
2298 }
2299 
2300 void
2301 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m)
2302 {
2303 	size_t valsize;
2304 	void *val;
2305 
2306 	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2307 	KKASSERT(kva_p(m));
2308 	if (sopt->sopt_val == NULL)
2309 		return;
2310 	val = sopt->sopt_val;
2311 	valsize = sopt->sopt_valsize;
2312 	while (m != NULL && valsize >= m->m_len) {
2313 		bcopy(val, mtod(m, char *), m->m_len);
2314 		valsize -= m->m_len;
2315 		val = (caddr_t)val + m->m_len;
2316 		m = m->m_next;
2317 	}
2318 	if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2319 		panic("ip6_sooptmcopyin");
2320 }
2321 
2322 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2323 int
2324 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2325 {
2326 	return soopt_from_mbuf(sopt, m);
2327 }
2328 
2329 int
2330 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m)
2331 {
2332 	struct mbuf *m0 = m;
2333 	size_t valsize = 0;
2334 	size_t maxsize;
2335 	void *val;
2336 
2337 	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2338 	KKASSERT(kva_p(m));
2339 	if (sopt->sopt_val == NULL)
2340 		return 0;
2341 	val = sopt->sopt_val;
2342 	maxsize = sopt->sopt_valsize;
2343 	while (m != NULL && maxsize >= m->m_len) {
2344 		bcopy(mtod(m, char *), val, m->m_len);
2345 	       maxsize -= m->m_len;
2346 	       val = (caddr_t)val + m->m_len;
2347 	       valsize += m->m_len;
2348 	       m = m->m_next;
2349 	}
2350 	if (m != NULL) {
2351 		/* enough soopt buffer should be given from user-land */
2352 		m_freem(m0);
2353 		return (EINVAL);
2354 	}
2355 	sopt->sopt_valsize = valsize;
2356 	return 0;
2357 }
2358 
2359 void
2360 sohasoutofband(struct socket *so)
2361 {
2362 	if (so->so_sigio != NULL)
2363 		pgsigio(so->so_sigio, SIGURG, 0);
2364 	KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB);
2365 }
2366 
2367 int
2368 sokqfilter(struct file *fp, struct knote *kn)
2369 {
2370 	struct socket *so = (struct socket *)kn->kn_fp->f_data;
2371 	struct signalsockbuf *ssb;
2372 
2373 	switch (kn->kn_filter) {
2374 	case EVFILT_READ:
2375 		if (so->so_options & SO_ACCEPTCONN)
2376 			kn->kn_fop = &solisten_filtops;
2377 		else
2378 			kn->kn_fop = &soread_filtops;
2379 		ssb = &so->so_rcv;
2380 		break;
2381 	case EVFILT_WRITE:
2382 		kn->kn_fop = &sowrite_filtops;
2383 		ssb = &so->so_snd;
2384 		break;
2385 	case EVFILT_EXCEPT:
2386 		kn->kn_fop = &soexcept_filtops;
2387 		ssb = &so->so_rcv;
2388 		break;
2389 	default:
2390 		return (EOPNOTSUPP);
2391 	}
2392 
2393 	knote_insert(&ssb->ssb_kq.ki_note, kn);
2394 	atomic_set_int(&ssb->ssb_flags, SSB_KNOTE);
2395 	return (0);
2396 }
2397 
2398 static void
2399 filt_sordetach(struct knote *kn)
2400 {
2401 	struct socket *so = (struct socket *)kn->kn_fp->f_data;
2402 
2403 	knote_remove(&so->so_rcv.ssb_kq.ki_note, kn);
2404 	if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note))
2405 		atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE);
2406 }
2407 
2408 /*ARGSUSED*/
2409 static int
2410 filt_soread(struct knote *kn, long hint)
2411 {
2412 	struct socket *so = (struct socket *)kn->kn_fp->f_data;
2413 
2414 	if (kn->kn_sfflags & NOTE_OOB) {
2415 		if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) {
2416 			kn->kn_fflags |= NOTE_OOB;
2417 			return (1);
2418 		}
2419 		return (0);
2420 	}
2421 	kn->kn_data = so->so_rcv.ssb_cc;
2422 
2423 	if (so->so_state & SS_CANTRCVMORE) {
2424 		/*
2425 		 * Only set NODATA if all data has been exhausted.
2426 		 */
2427 		if (kn->kn_data == 0)
2428 			kn->kn_flags |= EV_NODATA;
2429 		kn->kn_flags |= EV_EOF;
2430 		kn->kn_fflags = so->so_error;
2431 		return (1);
2432 	}
2433 	if (so->so_error)	/* temporary udp error */
2434 		return (1);
2435 	if (kn->kn_sfflags & NOTE_LOWAT)
2436 		return (kn->kn_data >= kn->kn_sdata);
2437 	return ((kn->kn_data >= so->so_rcv.ssb_lowat) ||
2438 		!TAILQ_EMPTY(&so->so_comp));
2439 }
2440 
2441 static void
2442 filt_sowdetach(struct knote *kn)
2443 {
2444 	struct socket *so = (struct socket *)kn->kn_fp->f_data;
2445 
2446 	knote_remove(&so->so_snd.ssb_kq.ki_note, kn);
2447 	if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note))
2448 		atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE);
2449 }
2450 
2451 /*ARGSUSED*/
2452 static int
2453 filt_sowrite(struct knote *kn, long hint)
2454 {
2455 	struct socket *so = (struct socket *)kn->kn_fp->f_data;
2456 
2457 	kn->kn_data = ssb_space(&so->so_snd);
2458 	if (so->so_state & SS_CANTSENDMORE) {
2459 		kn->kn_flags |= (EV_EOF | EV_NODATA);
2460 		kn->kn_fflags = so->so_error;
2461 		return (1);
2462 	}
2463 	if (so->so_error)	/* temporary udp error */
2464 		return (1);
2465 	if (((so->so_state & SS_ISCONNECTED) == 0) &&
2466 	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
2467 		return (0);
2468 	if (kn->kn_sfflags & NOTE_LOWAT)
2469 		return (kn->kn_data >= kn->kn_sdata);
2470 	return (kn->kn_data >= so->so_snd.ssb_lowat);
2471 }
2472 
2473 /*ARGSUSED*/
2474 static int
2475 filt_solisten(struct knote *kn, long hint)
2476 {
2477 	struct socket *so = (struct socket *)kn->kn_fp->f_data;
2478 
2479 	kn->kn_data = so->so_qlen;
2480 	return (! TAILQ_EMPTY(&so->so_comp));
2481 }
2482