xref: /openbsd/sys/netinet/tcp_usrreq.c (revision 3cab2bb3)
1 /*	$OpenBSD: tcp_usrreq.c,v 1.174 2020/08/01 23:41:55 gnezdo Exp $	*/
2 /*	$NetBSD: tcp_usrreq.c,v 1.20 1996/02/13 23:44:16 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
33  *
34  * NRL grants permission for redistribution and use in source and binary
35  * forms, with or without modification, of the software and documentation
36  * created at NRL provided that the following conditions are met:
37  *
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. All advertising materials mentioning features or use of this software
44  *    must display the following acknowledgements:
45  *	This product includes software developed by the University of
46  *	California, Berkeley and its contributors.
47  *	This product includes software developed at the Information
48  *	Technology Division, US Naval Research Laboratory.
49  * 4. Neither the name of the NRL nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
54  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
56  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
57  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
59  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
60  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
61  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  *
65  * The views and conclusions contained in the software and documentation
66  * are those of the authors and should not be interpreted as representing
67  * official policies, either expressed or implied, of the US Naval
68  * Research Laboratory (NRL).
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/mbuf.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/protosw.h>
77 #include <sys/stat.h>
78 #include <sys/sysctl.h>
79 #include <sys/domain.h>
80 #include <sys/kernel.h>
81 #include <sys/pool.h>
82 
83 #include <net/if.h>
84 #include <net/if_var.h>
85 #include <net/route.h>
86 
87 #include <netinet/in.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/ip_var.h>
92 #include <netinet/tcp.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_debug.h>
98 
99 #ifdef INET6
100 #include <netinet6/in6_var.h>
101 #endif
102 
103 #ifndef TCP_SENDSPACE
104 #define	TCP_SENDSPACE	1024*16
105 #endif
106 u_int	tcp_sendspace = TCP_SENDSPACE;
107 #ifndef TCP_RECVSPACE
108 #define	TCP_RECVSPACE	1024*16
109 #endif
110 u_int	tcp_recvspace = TCP_RECVSPACE;
111 u_int	tcp_autorcvbuf_inc = 16 * 1024;
112 
113 int *tcpctl_vars[TCPCTL_MAXID] = TCPCTL_VARS;
114 
115 struct	inpcbtable tcbtable;
116 
117 int tcp_ident(void *, size_t *, void *, size_t, int);
118 
119 /*
120  * Process a TCP user request for TCP tb.  If this is a send request
121  * then m is the mbuf chain of send data.  If this is a timer expiration
122  * (called from the software clock routine), then timertype tells which timer.
123  */
124 /*ARGSUSED*/
125 int
126 tcp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
127     struct mbuf *control, struct proc *p)
128 {
129 	struct inpcb *inp;
130 	struct tcpcb *otp = NULL, *tp = NULL;
131 	int error = 0;
132 	short ostate;
133 
134 	if (req == PRU_CONTROL) {
135 #ifdef INET6
136 		if (sotopf(so) == PF_INET6)
137 			return in6_control(so, (u_long)m, (caddr_t)nam,
138 			    (struct ifnet *)control);
139 		else
140 #endif /* INET6 */
141 			return (in_control(so, (u_long)m, (caddr_t)nam,
142 			    (struct ifnet *)control));
143 	}
144 
145 	soassertlocked(so);
146 
147 	if (control && control->m_len) {
148 		error = EINVAL;
149 		goto release;
150 	}
151 
152 	inp = sotoinpcb(so);
153 	/*
154 	 * When a TCP is attached to a socket, then there will be
155 	 * a (struct inpcb) pointed at by the socket, and this
156 	 * structure will point at a subsidiary (struct tcpcb).
157 	 */
158 	if (inp == NULL) {
159 		error = so->so_error;
160 		if (error == 0)
161 			error = EINVAL;
162 		goto release;
163 	}
164 	tp = intotcpcb(inp);
165 	/* tp might get 0 when using socket splicing */
166 	if (tp == NULL)
167 		goto release;
168 	if (so->so_options & SO_DEBUG) {
169 		otp = tp;
170 		ostate = tp->t_state;
171 	}
172 
173 	switch (req) {
174 
175 	/*
176 	 * Give the socket an address.
177 	 */
178 	case PRU_BIND:
179 		error = in_pcbbind(inp, nam, p);
180 		break;
181 
182 	/*
183 	 * Prepare to accept connections.
184 	 */
185 	case PRU_LISTEN:
186 		if (inp->inp_lport == 0)
187 			error = in_pcbbind(inp, NULL, p);
188 		/* If the in_pcbbind() above is called, the tp->pf
189 		   should still be whatever it was before. */
190 		if (error == 0)
191 			tp->t_state = TCPS_LISTEN;
192 		break;
193 
194 	/*
195 	 * Initiate connection to peer.
196 	 * Create a template for use in transmissions on this connection.
197 	 * Enter SYN_SENT state, and mark socket as connecting.
198 	 * Start keep-alive timer, and seed output sequence space.
199 	 * Send initial segment on connection.
200 	 */
201 	case PRU_CONNECT:
202 #ifdef INET6
203 		if (inp->inp_flags & INP_IPV6) {
204 			struct sockaddr_in6 *sin6;
205 
206 			if ((error = in6_nam2sin6(nam, &sin6)))
207 				break;
208 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
209 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
210 				error = EINVAL;
211 				break;
212 			}
213 			error = in6_pcbconnect(inp, nam);
214 		} else
215 #endif /* INET6 */
216 		{
217 			struct sockaddr_in *sin;
218 
219 			if ((error = in_nam2sin(nam, &sin)))
220 				break;
221 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
222 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
223 			    IN_MULTICAST(sin->sin_addr.s_addr) ||
224 			    in_broadcast(sin->sin_addr, inp->inp_rtableid)) {
225 				error = EINVAL;
226 				break;
227 			}
228 			error = in_pcbconnect(inp, nam);
229 		}
230 		if (error)
231 			break;
232 
233 		tp->t_template = tcp_template(tp);
234 		if (tp->t_template == 0) {
235 			in_pcbdisconnect(inp);
236 			error = ENOBUFS;
237 			break;
238 		}
239 
240 		so->so_state |= SS_CONNECTOUT;
241 
242 		/* Compute window scaling to request.  */
243 		tcp_rscale(tp, sb_max);
244 
245 		soisconnecting(so);
246 		tcpstat_inc(tcps_connattempt);
247 		tp->t_state = TCPS_SYN_SENT;
248 		TCP_TIMER_ARM(tp, TCPT_KEEP, tcptv_keep_init);
249 		tcp_set_iss_tsm(tp);
250 		tcp_sendseqinit(tp);
251 		tp->snd_last = tp->snd_una;
252 		error = tcp_output(tp);
253 		break;
254 
255 	/*
256 	 * Create a TCP connection between two sockets.
257 	 */
258 	case PRU_CONNECT2:
259 		error = EOPNOTSUPP;
260 		break;
261 
262 	/*
263 	 * Initiate disconnect from peer.
264 	 * If connection never passed embryonic stage, just drop;
265 	 * else if don't need to let data drain, then can just drop anyways,
266 	 * else have to begin TCP shutdown process: mark socket disconnecting,
267 	 * drain unread data, state switch to reflect user close, and
268 	 * send segment (e.g. FIN) to peer.  Socket will be really disconnected
269 	 * when peer sends FIN and acks ours.
270 	 *
271 	 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
272 	 */
273 	case PRU_DISCONNECT:
274 		tp = tcp_disconnect(tp);
275 		break;
276 
277 	/*
278 	 * Accept a connection.  Essentially all the work is
279 	 * done at higher levels; just return the address
280 	 * of the peer, storing through addr.
281 	 */
282 	case PRU_ACCEPT:
283 #ifdef INET6
284 		if (inp->inp_flags & INP_IPV6)
285 			in6_setpeeraddr(inp, nam);
286 		else
287 #endif
288 			in_setpeeraddr(inp, nam);
289 		break;
290 
291 	/*
292 	 * Mark the connection as being incapable of further output.
293 	 */
294 	case PRU_SHUTDOWN:
295 		if (so->so_state & SS_CANTSENDMORE)
296 			break;
297 		socantsendmore(so);
298 		tp = tcp_usrclosed(tp);
299 		if (tp)
300 			error = tcp_output(tp);
301 		break;
302 
303 	/*
304 	 * After a receive, possibly send window update to peer.
305 	 */
306 	case PRU_RCVD:
307 		/*
308 		 * soreceive() calls this function when a user receives
309 		 * ancillary data on a listening socket. We don't call
310 		 * tcp_output in such a case, since there is no header
311 		 * template for a listening socket and hence the kernel
312 		 * will panic.
313 		 */
314 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) != 0)
315 			(void) tcp_output(tp);
316 		break;
317 
318 	/*
319 	 * Do a send by putting data in output queue and updating urgent
320 	 * marker if URG set.  Possibly send more data.
321 	 */
322 	case PRU_SEND:
323 		sbappendstream(so, &so->so_snd, m);
324 		error = tcp_output(tp);
325 		break;
326 
327 	/*
328 	 * Abort the TCP.
329 	 */
330 	case PRU_ABORT:
331 		tp = tcp_drop(tp, ECONNABORTED);
332 		break;
333 
334 	case PRU_SENSE:
335 		((struct stat *) m)->st_blksize = so->so_snd.sb_hiwat;
336 		break;
337 
338 	case PRU_RCVOOB:
339 		if ((so->so_oobmark == 0 &&
340 		    (so->so_state & SS_RCVATMARK) == 0) ||
341 		    so->so_options & SO_OOBINLINE ||
342 		    tp->t_oobflags & TCPOOB_HADDATA) {
343 			error = EINVAL;
344 			break;
345 		}
346 		if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
347 			error = EWOULDBLOCK;
348 			break;
349 		}
350 		m->m_len = 1;
351 		*mtod(m, caddr_t) = tp->t_iobc;
352 		if (((long)nam & MSG_PEEK) == 0)
353 			tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
354 		break;
355 
356 	case PRU_SENDOOB:
357 		if (sbspace(so, &so->so_snd) < -512) {
358 			m_freem(m);
359 			error = ENOBUFS;
360 			break;
361 		}
362 		/*
363 		 * According to RFC961 (Assigned Protocols),
364 		 * the urgent pointer points to the last octet
365 		 * of urgent data.  We continue, however,
366 		 * to consider it to indicate the first octet
367 		 * of data past the urgent section.
368 		 * Otherwise, snd_up should be one lower.
369 		 */
370 		sbappendstream(so, &so->so_snd, m);
371 		tp->snd_up = tp->snd_una + so->so_snd.sb_cc;
372 		tp->t_force = 1;
373 		error = tcp_output(tp);
374 		tp->t_force = 0;
375 		break;
376 
377 	case PRU_SOCKADDR:
378 #ifdef INET6
379 		if (inp->inp_flags & INP_IPV6)
380 			in6_setsockaddr(inp, nam);
381 		else
382 #endif
383 			in_setsockaddr(inp, nam);
384 		break;
385 
386 	case PRU_PEERADDR:
387 #ifdef INET6
388 		if (inp->inp_flags & INP_IPV6)
389 			in6_setpeeraddr(inp, nam);
390 		else
391 #endif
392 			in_setpeeraddr(inp, nam);
393 		break;
394 
395 	default:
396 		panic("tcp_usrreq");
397 	}
398 	if (otp)
399 		tcp_trace(TA_USER, ostate, tp, otp, NULL, req, 0);
400 	return (error);
401 
402  release:
403 	if (req != PRU_RCVD && req != PRU_RCVOOB && req != PRU_SENSE) {
404 		m_freem(control);
405 		m_freem(m);
406 	}
407 	return (error);
408 }
409 
410 int
411 tcp_ctloutput(int op, struct socket *so, int level, int optname,
412     struct mbuf *m)
413 {
414 	int error = 0;
415 	struct inpcb *inp;
416 	struct tcpcb *tp;
417 	int i;
418 
419 	inp = sotoinpcb(so);
420 	if (inp == NULL)
421 		return (ECONNRESET);
422 	if (level != IPPROTO_TCP) {
423 		switch (so->so_proto->pr_domain->dom_family) {
424 #ifdef INET6
425 		case PF_INET6:
426 			error = ip6_ctloutput(op, so, level, optname, m);
427 			break;
428 #endif /* INET6 */
429 		case PF_INET:
430 			error = ip_ctloutput(op, so, level, optname, m);
431 			break;
432 		default:
433 			error = EAFNOSUPPORT;	/*?*/
434 			break;
435 		}
436 		return (error);
437 	}
438 	tp = intotcpcb(inp);
439 
440 	switch (op) {
441 
442 	case PRCO_SETOPT:
443 		switch (optname) {
444 
445 		case TCP_NODELAY:
446 			if (m == NULL || m->m_len < sizeof (int))
447 				error = EINVAL;
448 			else if (*mtod(m, int *))
449 				tp->t_flags |= TF_NODELAY;
450 			else
451 				tp->t_flags &= ~TF_NODELAY;
452 			break;
453 
454 		case TCP_NOPUSH:
455 			if (m == NULL || m->m_len < sizeof (int))
456 				error = EINVAL;
457 			else if (*mtod(m, int *))
458 				tp->t_flags |= TF_NOPUSH;
459 			else if (tp->t_flags & TF_NOPUSH) {
460 				tp->t_flags &= ~TF_NOPUSH;
461 				if (TCPS_HAVEESTABLISHED(tp->t_state))
462 					error = tcp_output(tp);
463 			}
464 			break;
465 
466 		case TCP_MAXSEG:
467 			if (m == NULL || m->m_len < sizeof (int)) {
468 				error = EINVAL;
469 				break;
470 			}
471 
472 			i = *mtod(m, int *);
473 			if (i > 0 && i <= tp->t_maxseg)
474 				tp->t_maxseg = i;
475 			else
476 				error = EINVAL;
477 			break;
478 
479 		case TCP_SACK_ENABLE:
480 			if (m == NULL || m->m_len < sizeof (int)) {
481 				error = EINVAL;
482 				break;
483 			}
484 
485 			if (TCPS_HAVEESTABLISHED(tp->t_state)) {
486 				error = EPERM;
487 				break;
488 			}
489 
490 			if (tp->t_flags & TF_SIGNATURE) {
491 				error = EPERM;
492 				break;
493 			}
494 
495 			if (*mtod(m, int *))
496 				tp->sack_enable = 1;
497 			else
498 				tp->sack_enable = 0;
499 			break;
500 #ifdef TCP_SIGNATURE
501 		case TCP_MD5SIG:
502 			if (m == NULL || m->m_len < sizeof (int)) {
503 				error = EINVAL;
504 				break;
505 			}
506 
507 			if (TCPS_HAVEESTABLISHED(tp->t_state)) {
508 				error = EPERM;
509 				break;
510 			}
511 
512 			if (*mtod(m, int *)) {
513 				tp->t_flags |= TF_SIGNATURE;
514 				tp->sack_enable = 0;
515 			} else
516 				tp->t_flags &= ~TF_SIGNATURE;
517 			break;
518 #endif /* TCP_SIGNATURE */
519 		default:
520 			error = ENOPROTOOPT;
521 			break;
522 		}
523 		break;
524 
525 	case PRCO_GETOPT:
526 		m->m_len = sizeof(int);
527 
528 		switch (optname) {
529 		case TCP_NODELAY:
530 			*mtod(m, int *) = tp->t_flags & TF_NODELAY;
531 			break;
532 		case TCP_NOPUSH:
533 			*mtod(m, int *) = tp->t_flags & TF_NOPUSH;
534 			break;
535 		case TCP_MAXSEG:
536 			*mtod(m, int *) = tp->t_maxseg;
537 			break;
538 		case TCP_SACK_ENABLE:
539 			*mtod(m, int *) = tp->sack_enable;
540 			break;
541 #ifdef TCP_SIGNATURE
542 		case TCP_MD5SIG:
543 			*mtod(m, int *) = tp->t_flags & TF_SIGNATURE;
544 			break;
545 #endif
546 		default:
547 			error = ENOPROTOOPT;
548 			break;
549 		}
550 		break;
551 	}
552 	return (error);
553 }
554 
555 /*
556  * Attach TCP protocol to socket, allocating
557  * internet protocol control block, tcp control block,
558  * buffer space, and entering LISTEN state to accept connections.
559  */
560 int
561 tcp_attach(struct socket *so, int proto)
562 {
563 	struct tcpcb *tp;
564 	struct inpcb *inp;
565 	int error;
566 
567 	if (so->so_pcb)
568 		return EISCONN;
569 	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0 ||
570 	    sbcheckreserve(so->so_snd.sb_wat, tcp_sendspace) ||
571 	    sbcheckreserve(so->so_rcv.sb_wat, tcp_recvspace)) {
572 		error = soreserve(so, tcp_sendspace, tcp_recvspace);
573 		if (error)
574 			return (error);
575 	}
576 
577 	NET_ASSERT_LOCKED();
578 	error = in_pcballoc(so, &tcbtable);
579 	if (error)
580 		return (error);
581 	inp = sotoinpcb(so);
582 	tp = tcp_newtcpcb(inp);
583 	if (tp == NULL) {
584 		unsigned int nofd = so->so_state & SS_NOFDREF;	/* XXX */
585 
586 		so->so_state &= ~SS_NOFDREF;	/* don't free the socket yet */
587 		in_pcbdetach(inp);
588 		so->so_state |= nofd;
589 		return (ENOBUFS);
590 	}
591 	tp->t_state = TCPS_CLOSED;
592 #ifdef INET6
593 	/* we disallow IPv4 mapped address completely. */
594 	if (inp->inp_flags & INP_IPV6)
595 		tp->pf = PF_INET6;
596 	else
597 		tp->pf = PF_INET;
598 #else
599 	tp->pf = PF_INET;
600 #endif
601 	if ((so->so_options & SO_LINGER) && so->so_linger == 0)
602 		so->so_linger = TCP_LINGERTIME;
603 
604 	if (so->so_options & SO_DEBUG)
605 		tcp_trace(TA_USER, TCPS_CLOSED, tp, tp, NULL, PRU_ATTACH, 0);
606 	return (0);
607 }
608 
609 int
610 tcp_detach(struct socket *so)
611 {
612 	struct inpcb *inp;
613 	struct tcpcb *otp = NULL, *tp = NULL;
614 	int error = 0;
615 	short ostate;
616 
617 	soassertlocked(so);
618 
619 	inp = sotoinpcb(so);
620 	/*
621 	 * When a TCP is attached to a socket, then there will be
622 	 * a (struct inpcb) pointed at by the socket, and this
623 	 * structure will point at a subsidiary (struct tcpcb).
624 	 */
625 	if (inp == NULL) {
626 		error = so->so_error;
627 		if (error == 0)
628 			error = EINVAL;
629 		return (error);
630 	}
631 	tp = intotcpcb(inp);
632 	/* tp might get 0 when using socket splicing */
633 	if (tp == NULL)
634 		return (0);
635 	if (so->so_options & SO_DEBUG) {
636 		otp = tp;
637 		ostate = tp->t_state;
638 	}
639 
640 	/*
641 	 * Detach the TCP protocol from the socket.
642 	 * If the protocol state is non-embryonic, then can't
643 	 * do this directly: have to initiate a PRU_DISCONNECT,
644 	 * which may finish later; embryonic TCB's can just
645 	 * be discarded here.
646 	 */
647 	tp = tcp_disconnect(tp);
648 
649 	if (otp)
650 		tcp_trace(TA_USER, ostate, tp, otp, NULL, PRU_DETACH, 0);
651 	return (error);
652 }
653 
654 /*
655  * Initiate (or continue) disconnect.
656  * If embryonic state, just send reset (once).
657  * If in ``let data drain'' option and linger null, just drop.
658  * Otherwise (hard), mark socket disconnecting and drop
659  * current input data; switch states based on user close, and
660  * send segment to peer (with FIN).
661  */
662 struct tcpcb *
663 tcp_disconnect(struct tcpcb *tp)
664 {
665 	struct socket *so = tp->t_inpcb->inp_socket;
666 
667 	if (TCPS_HAVEESTABLISHED(tp->t_state) == 0)
668 		tp = tcp_close(tp);
669 	else if ((so->so_options & SO_LINGER) && so->so_linger == 0)
670 		tp = tcp_drop(tp, 0);
671 	else {
672 		soisdisconnecting(so);
673 		sbflush(so, &so->so_rcv);
674 		tp = tcp_usrclosed(tp);
675 		if (tp)
676 			(void) tcp_output(tp);
677 	}
678 	return (tp);
679 }
680 
681 /*
682  * User issued close, and wish to trail through shutdown states:
683  * if never received SYN, just forget it.  If got a SYN from peer,
684  * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
685  * If already got a FIN from peer, then almost done; go to LAST_ACK
686  * state.  In all other cases, have already sent FIN to peer (e.g.
687  * after PRU_SHUTDOWN), and just have to play tedious game waiting
688  * for peer to send FIN or not respond to keep-alives, etc.
689  * We can let the user exit from the close as soon as the FIN is acked.
690  */
691 struct tcpcb *
692 tcp_usrclosed(struct tcpcb *tp)
693 {
694 
695 	switch (tp->t_state) {
696 
697 	case TCPS_CLOSED:
698 	case TCPS_LISTEN:
699 	case TCPS_SYN_SENT:
700 		tp->t_state = TCPS_CLOSED;
701 		tp = tcp_close(tp);
702 		break;
703 
704 	case TCPS_SYN_RECEIVED:
705 	case TCPS_ESTABLISHED:
706 		tp->t_state = TCPS_FIN_WAIT_1;
707 		break;
708 
709 	case TCPS_CLOSE_WAIT:
710 		tp->t_state = TCPS_LAST_ACK;
711 		break;
712 	}
713 	if (tp && tp->t_state >= TCPS_FIN_WAIT_2) {
714 		soisdisconnected(tp->t_inpcb->inp_socket);
715 		/*
716 		 * If we are in FIN_WAIT_2, we arrived here because the
717 		 * application did a shutdown of the send side.  Like the
718 		 * case of a transition from FIN_WAIT_1 to FIN_WAIT_2 after
719 		 * a full close, we start a timer to make sure sockets are
720 		 * not left in FIN_WAIT_2 forever.
721 		 */
722 		if (tp->t_state == TCPS_FIN_WAIT_2)
723 			TCP_TIMER_ARM(tp, TCPT_2MSL, tcp_maxidle);
724 	}
725 	return (tp);
726 }
727 
728 /*
729  * Look up a socket for ident or tcpdrop, ...
730  */
731 int
732 tcp_ident(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int dodrop)
733 {
734 	int error = 0;
735 	struct tcp_ident_mapping tir;
736 	struct inpcb *inp;
737 	struct tcpcb *tp = NULL;
738 	struct sockaddr_in *fin, *lin;
739 #ifdef INET6
740 	struct sockaddr_in6 *fin6, *lin6;
741 	struct in6_addr f6, l6;
742 #endif
743 
744 	NET_ASSERT_LOCKED();
745 
746 	if (dodrop) {
747 		if (oldp != NULL || *oldlenp != 0)
748 			return (EINVAL);
749 		if (newp == NULL)
750 			return (EPERM);
751 		if (newlen < sizeof(tir))
752 			return (ENOMEM);
753 		if ((error = copyin(newp, &tir, sizeof (tir))) != 0 )
754 			return (error);
755 	} else {
756 		if (oldp == NULL)
757 			return (EINVAL);
758 		if (*oldlenp < sizeof(tir))
759 			return (ENOMEM);
760 		if (newp != NULL || newlen != 0)
761 			return (EINVAL);
762 		if ((error = copyin(oldp, &tir, sizeof (tir))) != 0 )
763 			return (error);
764 	}
765 	switch (tir.faddr.ss_family) {
766 #ifdef INET6
767 	case AF_INET6:
768 		fin6 = (struct sockaddr_in6 *)&tir.faddr;
769 		error = in6_embedscope(&f6, fin6, NULL);
770 		if (error)
771 			return EINVAL;	/*?*/
772 		lin6 = (struct sockaddr_in6 *)&tir.laddr;
773 		error = in6_embedscope(&l6, lin6, NULL);
774 		if (error)
775 			return EINVAL;	/*?*/
776 		break;
777 #endif
778 	case AF_INET:
779 		fin = (struct sockaddr_in *)&tir.faddr;
780 		lin = (struct sockaddr_in *)&tir.laddr;
781 		break;
782 	default:
783 		return (EINVAL);
784 	}
785 
786 	switch (tir.faddr.ss_family) {
787 #ifdef INET6
788 	case AF_INET6:
789 		inp = in6_pcbhashlookup(&tcbtable, &f6,
790 		    fin6->sin6_port, &l6, lin6->sin6_port, tir.rdomain);
791 		break;
792 #endif
793 	case AF_INET:
794 		inp = in_pcbhashlookup(&tcbtable, fin->sin_addr,
795 		    fin->sin_port, lin->sin_addr, lin->sin_port, tir.rdomain);
796 		break;
797 	default:
798 		unhandled_af(tir.faddr.ss_family);
799 	}
800 
801 	if (dodrop) {
802 		if (inp && (tp = intotcpcb(inp)) &&
803 		    ((inp->inp_socket->so_options & SO_ACCEPTCONN) == 0))
804 			tp = tcp_drop(tp, ECONNABORTED);
805 		else
806 			error = ESRCH;
807 		return (error);
808 	}
809 
810 	if (inp == NULL) {
811 		tcpstat_inc(tcps_pcbhashmiss);
812 		switch (tir.faddr.ss_family) {
813 #ifdef INET6
814 		case AF_INET6:
815 			inp = in6_pcblookup_listen(&tcbtable,
816 			    &l6, lin6->sin6_port, NULL, tir.rdomain);
817 			break;
818 #endif
819 		case AF_INET:
820 			inp = in_pcblookup_listen(&tcbtable,
821 			    lin->sin_addr, lin->sin_port, NULL, tir.rdomain);
822 			break;
823 		}
824 	}
825 
826 	if (inp != NULL && (inp->inp_socket->so_state & SS_CONNECTOUT)) {
827 		tir.ruid = inp->inp_socket->so_ruid;
828 		tir.euid = inp->inp_socket->so_euid;
829 	} else {
830 		tir.ruid = -1;
831 		tir.euid = -1;
832 	}
833 
834 	*oldlenp = sizeof (tir);
835 	error = copyout((void *)&tir, oldp, sizeof (tir));
836 	return (error);
837 }
838 
839 int
840 tcp_sysctl_tcpstat(void *oldp, size_t *oldlenp, void *newp)
841 {
842 	uint64_t counters[tcps_ncounters];
843 	struct tcpstat tcpstat;
844 	struct syn_cache_set *set;
845 	int i = 0;
846 
847 #define ASSIGN(field)	do { tcpstat.field = counters[i++]; } while (0)
848 
849 	memset(&tcpstat, 0, sizeof tcpstat);
850 	counters_read(tcpcounters, counters, nitems(counters));
851 	ASSIGN(tcps_connattempt);
852 	ASSIGN(tcps_accepts);
853 	ASSIGN(tcps_connects);
854 	ASSIGN(tcps_drops);
855 	ASSIGN(tcps_conndrops);
856 	ASSIGN(tcps_closed);
857 	ASSIGN(tcps_segstimed);
858 	ASSIGN(tcps_rttupdated);
859 	ASSIGN(tcps_delack);
860 	ASSIGN(tcps_timeoutdrop);
861 	ASSIGN(tcps_rexmttimeo);
862 	ASSIGN(tcps_persisttimeo);
863 	ASSIGN(tcps_persistdrop);
864 	ASSIGN(tcps_keeptimeo);
865 	ASSIGN(tcps_keepprobe);
866 	ASSIGN(tcps_keepdrops);
867 	ASSIGN(tcps_sndtotal);
868 	ASSIGN(tcps_sndpack);
869 	ASSIGN(tcps_sndbyte);
870 	ASSIGN(tcps_sndrexmitpack);
871 	ASSIGN(tcps_sndrexmitbyte);
872 	ASSIGN(tcps_sndrexmitfast);
873 	ASSIGN(tcps_sndacks);
874 	ASSIGN(tcps_sndprobe);
875 	ASSIGN(tcps_sndurg);
876 	ASSIGN(tcps_sndwinup);
877 	ASSIGN(tcps_sndctrl);
878 	ASSIGN(tcps_rcvtotal);
879 	ASSIGN(tcps_rcvpack);
880 	ASSIGN(tcps_rcvbyte);
881 	ASSIGN(tcps_rcvbadsum);
882 	ASSIGN(tcps_rcvbadoff);
883 	ASSIGN(tcps_rcvmemdrop);
884 	ASSIGN(tcps_rcvnosec);
885 	ASSIGN(tcps_rcvshort);
886 	ASSIGN(tcps_rcvduppack);
887 	ASSIGN(tcps_rcvdupbyte);
888 	ASSIGN(tcps_rcvpartduppack);
889 	ASSIGN(tcps_rcvpartdupbyte);
890 	ASSIGN(tcps_rcvoopack);
891 	ASSIGN(tcps_rcvoobyte);
892 	ASSIGN(tcps_rcvpackafterwin);
893 	ASSIGN(tcps_rcvbyteafterwin);
894 	ASSIGN(tcps_rcvafterclose);
895 	ASSIGN(tcps_rcvwinprobe);
896 	ASSIGN(tcps_rcvdupack);
897 	ASSIGN(tcps_rcvacktoomuch);
898 	ASSIGN(tcps_rcvacktooold);
899 	ASSIGN(tcps_rcvackpack);
900 	ASSIGN(tcps_rcvackbyte);
901 	ASSIGN(tcps_rcvwinupd);
902 	ASSIGN(tcps_pawsdrop);
903 	ASSIGN(tcps_predack);
904 	ASSIGN(tcps_preddat);
905 	ASSIGN(tcps_pcbhashmiss);
906 	ASSIGN(tcps_noport);
907 	ASSIGN(tcps_badsyn);
908 	ASSIGN(tcps_dropsyn);
909 	ASSIGN(tcps_rcvbadsig);
910 	ASSIGN(tcps_rcvgoodsig);
911 	ASSIGN(tcps_inswcsum);
912 	ASSIGN(tcps_outswcsum);
913 	ASSIGN(tcps_ecn_accepts);
914 	ASSIGN(tcps_ecn_rcvece);
915 	ASSIGN(tcps_ecn_rcvcwr);
916 	ASSIGN(tcps_ecn_rcvce);
917 	ASSIGN(tcps_ecn_sndect);
918 	ASSIGN(tcps_ecn_sndece);
919 	ASSIGN(tcps_ecn_sndcwr);
920 	ASSIGN(tcps_cwr_ecn);
921 	ASSIGN(tcps_cwr_frecovery);
922 	ASSIGN(tcps_cwr_timeout);
923 	ASSIGN(tcps_sc_added);
924 	ASSIGN(tcps_sc_completed);
925 	ASSIGN(tcps_sc_timed_out);
926 	ASSIGN(tcps_sc_overflowed);
927 	ASSIGN(tcps_sc_reset);
928 	ASSIGN(tcps_sc_unreach);
929 	ASSIGN(tcps_sc_bucketoverflow);
930 	ASSIGN(tcps_sc_aborted);
931 	ASSIGN(tcps_sc_dupesyn);
932 	ASSIGN(tcps_sc_dropped);
933 	ASSIGN(tcps_sc_collisions);
934 	ASSIGN(tcps_sc_retransmitted);
935 	ASSIGN(tcps_sc_seedrandom);
936 	ASSIGN(tcps_sc_hash_size);
937 	ASSIGN(tcps_sc_entry_count);
938 	ASSIGN(tcps_sc_entry_limit);
939 	ASSIGN(tcps_sc_bucket_maxlen);
940 	ASSIGN(tcps_sc_bucket_limit);
941 	ASSIGN(tcps_sc_uses_left);
942 	ASSIGN(tcps_conndrained);
943 	ASSIGN(tcps_sack_recovery_episode);
944 	ASSIGN(tcps_sack_rexmits);
945 	ASSIGN(tcps_sack_rexmit_bytes);
946 	ASSIGN(tcps_sack_rcv_opts);
947 	ASSIGN(tcps_sack_snd_opts);
948 	ASSIGN(tcps_sack_drop_opts);
949 
950 #undef ASSIGN
951 
952 	set = &tcp_syn_cache[tcp_syn_cache_active];
953 	tcpstat.tcps_sc_hash_size = set->scs_size;
954 	tcpstat.tcps_sc_entry_count = set->scs_count;
955 	tcpstat.tcps_sc_entry_limit = tcp_syn_cache_limit;
956 	tcpstat.tcps_sc_bucket_maxlen = 0;
957 	for (i = 0; i < set->scs_size; i++) {
958 		if (tcpstat.tcps_sc_bucket_maxlen <
959 		    set->scs_buckethead[i].sch_length)
960 			tcpstat.tcps_sc_bucket_maxlen =
961 				set->scs_buckethead[i].sch_length;
962 	}
963 	tcpstat.tcps_sc_bucket_limit = tcp_syn_bucket_limit;
964 	tcpstat.tcps_sc_uses_left = set->scs_use;
965 
966 	return (sysctl_rdstruct(oldp, oldlenp, newp,
967 	    &tcpstat, sizeof(tcpstat)));
968 }
969 
970 /*
971  * Sysctl for tcp variables.
972  */
973 int
974 tcp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
975     size_t newlen)
976 {
977 	int error, nval;
978 
979 	/* All sysctl names at this level are terminal. */
980 	if (namelen != 1)
981 		return (ENOTDIR);
982 
983 	switch (name[0]) {
984 	case TCPCTL_SACK:
985 		NET_LOCK();
986 		error = sysctl_int(oldp, oldlenp, newp, newlen,
987 		    &tcp_do_sack);
988 		NET_UNLOCK();
989 		return (error);
990 
991 	case TCPCTL_SLOWHZ:
992 		return (sysctl_rdint(oldp, oldlenp, newp, PR_SLOWHZ));
993 
994 	case TCPCTL_BADDYNAMIC:
995 		NET_LOCK();
996 		error = sysctl_struct(oldp, oldlenp, newp, newlen,
997 		    baddynamicports.tcp, sizeof(baddynamicports.tcp));
998 		NET_UNLOCK();
999 		return (error);
1000 
1001 	case TCPCTL_ROOTONLY:
1002 		if (newp && securelevel > 0)
1003 			return (EPERM);
1004 		NET_LOCK();
1005 		error = sysctl_struct(oldp, oldlenp, newp, newlen,
1006 		    rootonlyports.tcp, sizeof(rootonlyports.tcp));
1007 		NET_UNLOCK();
1008 		return (error);
1009 
1010 	case TCPCTL_IDENT:
1011 		NET_LOCK();
1012 		error = tcp_ident(oldp, oldlenp, newp, newlen, 0);
1013 		NET_UNLOCK();
1014 		return (error);
1015 
1016 	case TCPCTL_DROP:
1017 		NET_LOCK();
1018 		error = tcp_ident(oldp, oldlenp, newp, newlen, 1);
1019 		NET_UNLOCK();
1020 		return (error);
1021 
1022 	case TCPCTL_ALWAYS_KEEPALIVE:
1023 		NET_LOCK();
1024 		error = sysctl_int(oldp, oldlenp, newp, newlen,
1025 		    &tcp_always_keepalive);
1026 		NET_UNLOCK();
1027 		return (error);
1028 
1029 #ifdef TCP_ECN
1030 	case TCPCTL_ECN:
1031 		NET_LOCK();
1032 		error = sysctl_int(oldp, oldlenp, newp, newlen,
1033 		   &tcp_do_ecn);
1034 		NET_UNLOCK();
1035 		return (error);
1036 #endif
1037 	case TCPCTL_REASS_LIMIT:
1038 		NET_LOCK();
1039 		nval = tcp_reass_limit;
1040 		error = sysctl_int(oldp, oldlenp, newp, newlen, &nval);
1041 		if (!error && nval != tcp_reass_limit) {
1042 			error = pool_sethardlimit(&tcpqe_pool, nval, NULL, 0);
1043 			if (!error)
1044 				tcp_reass_limit = nval;
1045 		}
1046 		NET_UNLOCK();
1047 		return (error);
1048 
1049 	case TCPCTL_SACKHOLE_LIMIT:
1050 		NET_LOCK();
1051 		nval = tcp_sackhole_limit;
1052 		error = sysctl_int(oldp, oldlenp, newp, newlen, &nval);
1053 		if (!error && nval != tcp_sackhole_limit) {
1054 			error = pool_sethardlimit(&sackhl_pool, nval, NULL, 0);
1055 			if (!error)
1056 				tcp_sackhole_limit = nval;
1057 		}
1058 		NET_UNLOCK();
1059 		return (error);
1060 
1061 	case TCPCTL_STATS:
1062 		return (tcp_sysctl_tcpstat(oldp, oldlenp, newp));
1063 
1064 	case TCPCTL_SYN_BUCKET_LIMIT:
1065 		NET_LOCK();
1066 		nval = tcp_syn_bucket_limit;
1067 		error = sysctl_int(oldp, oldlenp, newp, newlen, &nval);
1068 		if (!error && nval != tcp_syn_bucket_limit) {
1069 			if (nval > 0)
1070 				tcp_syn_bucket_limit = nval;
1071 			else
1072 				error = EINVAL;
1073 		}
1074 		NET_UNLOCK();
1075 		return (error);
1076 
1077 	case TCPCTL_SYN_USE_LIMIT:
1078 		NET_LOCK();
1079 		error = sysctl_int(oldp, oldlenp, newp, newlen,
1080 		    &tcp_syn_use_limit);
1081 		if (!error && newp != NULL) {
1082 			/*
1083 			 * Global tcp_syn_use_limit is used when reseeding a
1084 			 * new cache.  Also update the value in active cache.
1085 			 */
1086 			if (tcp_syn_cache[0].scs_use > tcp_syn_use_limit)
1087 				tcp_syn_cache[0].scs_use = tcp_syn_use_limit;
1088 			if (tcp_syn_cache[1].scs_use > tcp_syn_use_limit)
1089 				tcp_syn_cache[1].scs_use = tcp_syn_use_limit;
1090 		}
1091 		NET_UNLOCK();
1092 		return (error);
1093 
1094 	case TCPCTL_SYN_HASH_SIZE:
1095 		NET_LOCK();
1096 		nval = tcp_syn_hash_size;
1097 		error = sysctl_int(oldp, oldlenp, newp, newlen, &nval);
1098 		if (!error && nval != tcp_syn_hash_size) {
1099 			if (nval < 1 || nval > 100000) {
1100 				error = EINVAL;
1101 			} else {
1102 				/*
1103 				 * If global hash size has been changed,
1104 				 * switch sets as soon as possible.  Then
1105 				 * the actual hash array will be reallocated.
1106 				 */
1107 				if (tcp_syn_cache[0].scs_size != nval)
1108 					tcp_syn_cache[0].scs_use = 0;
1109 				if (tcp_syn_cache[1].scs_size != nval)
1110 					tcp_syn_cache[1].scs_use = 0;
1111 				tcp_syn_hash_size = nval;
1112 			}
1113 		}
1114 		NET_UNLOCK();
1115 		return (error);
1116 
1117 	default:
1118 		NET_LOCK();
1119 		error = sysctl_int_arr(tcpctl_vars, nitems(tcpctl_vars), name,
1120 		     namelen, oldp, oldlenp, newp, newlen);
1121 		NET_UNLOCK();
1122 		return (error);
1123 	}
1124 	/* NOTREACHED */
1125 }
1126 
1127 /*
1128  * Scale the send buffer so that inflight data is not accounted against
1129  * the limit. The buffer will scale with the congestion window, if the
1130  * the receiver stops acking data the window will shrink and therefor
1131  * the buffer size will shrink as well.
1132  * In low memory situation try to shrink the buffer to the initial size
1133  * disabling the send buffer scaling as long as the situation persists.
1134  */
1135 void
1136 tcp_update_sndspace(struct tcpcb *tp)
1137 {
1138 	struct socket *so = tp->t_inpcb->inp_socket;
1139 	u_long nmax = so->so_snd.sb_hiwat;
1140 
1141 	if (sbchecklowmem()) {
1142 		/* low on memory try to get rid of some */
1143 		if (tcp_sendspace < nmax)
1144 			nmax = tcp_sendspace;
1145 	} else if (so->so_snd.sb_wat != tcp_sendspace)
1146 		/* user requested buffer size, auto-scaling disabled */
1147 		nmax = so->so_snd.sb_wat;
1148 	else
1149 		/* automatic buffer scaling */
1150 		nmax = MIN(sb_max, so->so_snd.sb_wat + tp->snd_max -
1151 		    tp->snd_una);
1152 
1153 	/* a writable socket must be preserved because of poll(2) semantics */
1154 	if (sbspace(so, &so->so_snd) >= so->so_snd.sb_lowat) {
1155 		if (nmax < so->so_snd.sb_cc + so->so_snd.sb_lowat)
1156 			nmax = so->so_snd.sb_cc + so->so_snd.sb_lowat;
1157 		/* keep in sync with sbreserve() calculation */
1158 		if (nmax * 8 < so->so_snd.sb_mbcnt + so->so_snd.sb_lowat)
1159 			nmax = (so->so_snd.sb_mbcnt+so->so_snd.sb_lowat+7) / 8;
1160 	}
1161 
1162 	/* round to MSS boundary */
1163 	nmax = roundup(nmax, tp->t_maxseg);
1164 
1165 	if (nmax != so->so_snd.sb_hiwat)
1166 		sbreserve(so, &so->so_snd, nmax);
1167 }
1168 
1169 /*
1170  * Scale the recv buffer by looking at how much data was transferred in
1171  * on approximated RTT. If more than a big part of the recv buffer was
1172  * transferred during that time we increase the buffer by a constant.
1173  * In low memory situation try to shrink the buffer to the initial size.
1174  */
1175 void
1176 tcp_update_rcvspace(struct tcpcb *tp)
1177 {
1178 	struct socket *so = tp->t_inpcb->inp_socket;
1179 	u_long nmax = so->so_rcv.sb_hiwat;
1180 
1181 	if (sbchecklowmem()) {
1182 		/* low on memory try to get rid of some */
1183 		if (tcp_recvspace < nmax)
1184 			nmax = tcp_recvspace;
1185 	} else if (so->so_rcv.sb_wat != tcp_recvspace)
1186 		/* user requested buffer size, auto-scaling disabled */
1187 		nmax = so->so_rcv.sb_wat;
1188 	else {
1189 		/* automatic buffer scaling */
1190 		if (tp->rfbuf_cnt > so->so_rcv.sb_hiwat / 8 * 7)
1191 			nmax = MIN(sb_max, so->so_rcv.sb_hiwat +
1192 			    tcp_autorcvbuf_inc);
1193 	}
1194 
1195 	/* a readable socket must be preserved because of poll(2) semantics */
1196 	if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat &&
1197 	    nmax < so->so_snd.sb_lowat)
1198 		nmax = so->so_snd.sb_lowat;
1199 
1200 	if (nmax == so->so_rcv.sb_hiwat)
1201 		return;
1202 
1203 	/* round to MSS boundary */
1204 	nmax = roundup(nmax, tp->t_maxseg);
1205 	sbreserve(so, &so->so_rcv, nmax);
1206 }
1207