1 /* $OpenBSD: tcp_usrreq.c,v 1.240 2025/01/16 11:59:20 bluhm Exp $ */
2 /* $NetBSD: tcp_usrreq.c,v 1.20 1996/02/13 23:44:16 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1988, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
33 *
34 * NRL grants permission for redistribution and use in source and binary
35 * forms, with or without modification, of the software and documentation
36 * created at NRL provided that the following conditions are met:
37 *
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgements:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * This product includes software developed at the Information
48 * Technology Division, US Naval Research Laboratory.
49 * 4. Neither the name of the NRL nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
54 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
56 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
57 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
59 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
60 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
61 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 * The views and conclusions contained in the software and documentation
66 * are those of the authors and should not be interpreted as representing
67 * official policies, either expressed or implied, of the US Naval
68 * Research Laboratory (NRL).
69 */
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/mbuf.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/protosw.h>
77 #include <sys/stat.h>
78 #include <sys/sysctl.h>
79 #include <sys/domain.h>
80 #include <sys/kernel.h>
81 #include <sys/pool.h>
82 #include <sys/proc.h>
83
84 #include <net/if.h>
85 #include <net/if_var.h>
86 #include <net/route.h>
87
88 #include <netinet/in.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
91 #include <netinet/in_pcb.h>
92 #include <netinet/ip_var.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet/tcp.h>
95 #include <netinet/tcp_fsm.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/tcp_timer.h>
98 #include <netinet/tcp_var.h>
99 #include <netinet/tcp_debug.h>
100
101 #ifdef INET6
102 #include <netinet6/in6_var.h>
103 #endif
104
105 /*
106 * Locks used to protect global variables in this file:
107 * I immutable after creation
108 */
109
110 #ifndef TCP_SENDSPACE
111 #define TCP_SENDSPACE 1024*16
112 #endif
113 u_int tcp_sendspace = TCP_SENDSPACE; /* [I] */
114 #ifndef TCP_RECVSPACE
115 #define TCP_RECVSPACE 1024*16
116 #endif
117 u_int tcp_recvspace = TCP_RECVSPACE; /* [I] */
118 u_int tcp_autorcvbuf_inc = 16 * 1024; /* [I] */
119
120 const struct pr_usrreqs tcp_usrreqs = {
121 .pru_attach = tcp_attach,
122 .pru_detach = tcp_detach,
123 .pru_bind = tcp_bind,
124 .pru_listen = tcp_listen,
125 .pru_connect = tcp_connect,
126 .pru_accept = tcp_accept,
127 .pru_disconnect = tcp_disconnect,
128 .pru_shutdown = tcp_shutdown,
129 .pru_rcvd = tcp_rcvd,
130 .pru_send = tcp_send,
131 .pru_abort = tcp_abort,
132 .pru_sense = tcp_sense,
133 .pru_rcvoob = tcp_rcvoob,
134 .pru_sendoob = tcp_sendoob,
135 .pru_control = in_control,
136 .pru_sockaddr = tcp_sockaddr,
137 .pru_peeraddr = tcp_peeraddr,
138 };
139
140 #ifdef INET6
141 const struct pr_usrreqs tcp6_usrreqs = {
142 .pru_attach = tcp_attach,
143 .pru_detach = tcp_detach,
144 .pru_bind = tcp_bind,
145 .pru_listen = tcp_listen,
146 .pru_connect = tcp_connect,
147 .pru_accept = tcp_accept,
148 .pru_disconnect = tcp_disconnect,
149 .pru_shutdown = tcp_shutdown,
150 .pru_rcvd = tcp_rcvd,
151 .pru_send = tcp_send,
152 .pru_abort = tcp_abort,
153 .pru_sense = tcp_sense,
154 .pru_rcvoob = tcp_rcvoob,
155 .pru_sendoob = tcp_sendoob,
156 .pru_control = in6_control,
157 .pru_sockaddr = tcp_sockaddr,
158 .pru_peeraddr = tcp_peeraddr,
159 };
160 #endif
161
162 const struct sysctl_bounded_args tcpctl_vars[] = {
163 { TCPCTL_KEEPINITTIME, &tcp_keepinit_sec, 1,
164 3 * TCPTV_KEEPINIT / TCP_TIME(1) },
165 { TCPCTL_KEEPIDLE, &tcp_keepidle_sec, 1,
166 5 * TCPTV_KEEPIDLE / TCP_TIME(1) },
167 { TCPCTL_KEEPINTVL, &tcp_keepintvl_sec, 1,
168 3 * TCPTV_KEEPINTVL / TCP_TIME(1) },
169 { TCPCTL_RFC1323, &tcp_do_rfc1323, 0, 1 },
170 { TCPCTL_SACK, &tcp_do_sack, 0, 1 },
171 { TCPCTL_MSSDFLT, &tcp_mssdflt, TCP_MSS, 65535 },
172 { TCPCTL_RSTPPSLIMIT, &tcp_rst_ppslim, 1, 1000 * 1000 },
173 { TCPCTL_ACK_ON_PUSH, &tcp_ack_on_push, 0, 1 },
174 #ifdef TCP_ECN
175 { TCPCTL_ECN, &tcp_do_ecn, 0, 1 },
176 #endif
177 { TCPCTL_SYN_CACHE_LIMIT, &tcp_syn_cache_limit, 1, 1000 * 1000 },
178 { TCPCTL_SYN_BUCKET_LIMIT, &tcp_syn_bucket_limit, 1, INT_MAX },
179 { TCPCTL_RFC3390, &tcp_do_rfc3390, 0, 2 },
180 { TCPCTL_ALWAYS_KEEPALIVE, &tcp_always_keepalive, 0, 1 },
181 { TCPCTL_TSO, &tcp_do_tso, 0, 1 },
182 };
183
184 struct inpcbtable tcbtable;
185 #ifdef INET6
186 struct inpcbtable tcb6table;
187 #endif
188
189 int tcp_fill_info(struct tcpcb *, struct socket *, struct mbuf *);
190 int tcp_ident(void *, size_t *, void *, size_t, int);
191
192 static inline int tcp_sogetpcb(struct socket *, struct inpcb **,
193 struct tcpcb **);
194
195 static inline int
tcp_sogetpcb(struct socket * so,struct inpcb ** rinp,struct tcpcb ** rtp)196 tcp_sogetpcb(struct socket *so, struct inpcb **rinp, struct tcpcb **rtp)
197 {
198 struct inpcb *inp;
199 struct tcpcb *tp;
200
201 /*
202 * When a TCP is attached to a socket, then there will be
203 * a (struct inpcb) pointed at by the socket, and this
204 * structure will point at a subsidiary (struct tcpcb).
205 */
206 if ((inp = sotoinpcb(so)) == NULL || (tp = intotcpcb(inp)) == NULL) {
207 int error;
208
209 if ((error = READ_ONCE(so->so_error)))
210 return error;
211 return EINVAL;
212 }
213
214 *rinp = inp;
215 *rtp = tp;
216
217 return 0;
218 }
219
220 /*
221 * Export internal TCP state information via a struct tcp_info without
222 * leaking any sensitive information. Sequence numbers are reported
223 * relative to the initial sequence number.
224 */
225 int
tcp_fill_info(struct tcpcb * tp,struct socket * so,struct mbuf * m)226 tcp_fill_info(struct tcpcb *tp, struct socket *so, struct mbuf *m)
227 {
228 struct proc *p = curproc;
229 struct tcp_info *ti;
230 u_int t = 1000; /* msec => usec */
231 uint64_t now;
232
233 if (sizeof(*ti) > MLEN) {
234 MCLGETL(m, M_WAITOK, sizeof(*ti));
235 if (!ISSET(m->m_flags, M_EXT))
236 return ENOMEM;
237 }
238 ti = mtod(m, struct tcp_info *);
239 m->m_len = sizeof(*ti);
240 memset(ti, 0, sizeof(*ti));
241 now = tcp_now();
242
243 ti->tcpi_state = tp->t_state;
244 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
245 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
246 if (tp->t_flags & TF_SACK_PERMIT)
247 ti->tcpi_options |= TCPI_OPT_SACK;
248 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
249 ti->tcpi_options |= TCPI_OPT_WSCALE;
250 ti->tcpi_snd_wscale = tp->snd_scale;
251 ti->tcpi_rcv_wscale = tp->rcv_scale;
252 }
253 #ifdef TCP_ECN
254 if (tp->t_flags & TF_ECN_PERMIT)
255 ti->tcpi_options |= TCPI_OPT_ECN;
256 #endif
257
258 ti->tcpi_rto = tp->t_rxtcur * t;
259 ti->tcpi_snd_mss = tp->t_maxseg;
260 ti->tcpi_rcv_mss = tp->t_peermss;
261
262 ti->tcpi_last_data_sent = (now - tp->t_sndtime) * t;
263 ti->tcpi_last_ack_sent = (now - tp->t_sndacktime) * t;
264 ti->tcpi_last_data_recv = (now - tp->t_rcvtime) * t;
265 ti->tcpi_last_ack_recv = (now - tp->t_rcvacktime) * t;
266
267 ti->tcpi_rtt = ((uint64_t)tp->t_srtt * t) >>
268 (TCP_RTT_SHIFT + TCP_RTT_BASE_SHIFT);
269 ti->tcpi_rttvar = ((uint64_t)tp->t_rttvar * t) >>
270 (TCP_RTTVAR_SHIFT + TCP_RTT_BASE_SHIFT);
271 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
272 ti->tcpi_snd_cwnd = tp->snd_cwnd;
273
274 ti->tcpi_rcv_space = tp->rcv_wnd;
275
276 /*
277 * Provide only minimal information for unprivileged processes.
278 */
279 if (suser(p) != 0)
280 return 0;
281
282 /* FreeBSD-specific extension fields for tcp_info. */
283 ti->tcpi_snd_wnd = tp->snd_wnd;
284 ti->tcpi_snd_nxt = tp->snd_nxt - tp->iss;
285 ti->tcpi_rcv_nxt = tp->rcv_nxt - tp->irs;
286 /* missing tcpi_toe_tid */
287 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
288 ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
289 ti->tcpi_snd_zerowin = tp->t_sndzerowin;
290
291 /* OpenBSD extensions */
292 ti->tcpi_rttmin = tp->t_rttmin * t;
293 ti->tcpi_max_sndwnd = tp->max_sndwnd;
294 ti->tcpi_rcv_adv = tp->rcv_adv - tp->irs;
295 ti->tcpi_rcv_up = tp->rcv_up - tp->irs;
296 ti->tcpi_snd_una = tp->snd_una - tp->iss;
297 ti->tcpi_snd_up = tp->snd_up - tp->iss;
298 ti->tcpi_snd_wl1 = tp->snd_wl1 - tp->iss;
299 ti->tcpi_snd_wl2 = tp->snd_wl2 - tp->iss;
300 ti->tcpi_snd_max = tp->snd_max - tp->iss;
301
302 ti->tcpi_ts_recent = tp->ts_recent; /* XXX value from the wire */
303 ti->tcpi_ts_recent_age = (now - tp->ts_recent_age) * t;
304 ti->tcpi_rfbuf_cnt = tp->rfbuf_cnt;
305 ti->tcpi_rfbuf_ts = (now - tp->rfbuf_ts) * t;
306
307 mtx_enter(&so->so_rcv.sb_mtx);
308 ti->tcpi_so_rcv_sb_cc = so->so_rcv.sb_cc;
309 ti->tcpi_so_rcv_sb_hiwat = so->so_rcv.sb_hiwat;
310 ti->tcpi_so_rcv_sb_lowat = so->so_rcv.sb_lowat;
311 ti->tcpi_so_rcv_sb_wat = so->so_rcv.sb_wat;
312 mtx_leave(&so->so_rcv.sb_mtx);
313 mtx_enter(&so->so_snd.sb_mtx);
314 ti->tcpi_so_snd_sb_cc = so->so_snd.sb_cc;
315 ti->tcpi_so_snd_sb_hiwat = so->so_snd.sb_hiwat;
316 ti->tcpi_so_snd_sb_lowat = so->so_snd.sb_lowat;
317 ti->tcpi_so_snd_sb_wat = so->so_snd.sb_wat;
318 mtx_leave(&so->so_snd.sb_mtx);
319
320 return 0;
321 }
322
323 int
tcp_ctloutput(int op,struct socket * so,int level,int optname,struct mbuf * m)324 tcp_ctloutput(int op, struct socket *so, int level, int optname,
325 struct mbuf *m)
326 {
327 int error = 0;
328 struct inpcb *inp;
329 struct tcpcb *tp;
330 int i;
331
332 inp = sotoinpcb(so);
333 if (inp == NULL)
334 return (ECONNRESET);
335 if (level != IPPROTO_TCP) {
336 #ifdef INET6
337 if (ISSET(inp->inp_flags, INP_IPV6))
338 error = ip6_ctloutput(op, so, level, optname, m);
339 else
340 #endif
341 error = ip_ctloutput(op, so, level, optname, m);
342 return (error);
343 }
344 tp = intotcpcb(inp);
345
346 switch (op) {
347
348 case PRCO_SETOPT:
349 switch (optname) {
350
351 case TCP_NODELAY:
352 if (m == NULL || m->m_len < sizeof (int))
353 error = EINVAL;
354 else if (*mtod(m, int *))
355 tp->t_flags |= TF_NODELAY;
356 else
357 tp->t_flags &= ~TF_NODELAY;
358 break;
359
360 case TCP_NOPUSH:
361 if (m == NULL || m->m_len < sizeof (int))
362 error = EINVAL;
363 else if (*mtod(m, int *))
364 tp->t_flags |= TF_NOPUSH;
365 else if (tp->t_flags & TF_NOPUSH) {
366 tp->t_flags &= ~TF_NOPUSH;
367 if (TCPS_HAVEESTABLISHED(tp->t_state))
368 error = tcp_output(tp);
369 }
370 break;
371
372 case TCP_MAXSEG:
373 if (m == NULL || m->m_len < sizeof (int)) {
374 error = EINVAL;
375 break;
376 }
377
378 i = *mtod(m, int *);
379 if (i > 0 && i <= tp->t_maxseg)
380 tp->t_maxseg = i;
381 else
382 error = EINVAL;
383 break;
384
385 case TCP_SACK_ENABLE:
386 if (m == NULL || m->m_len < sizeof (int)) {
387 error = EINVAL;
388 break;
389 }
390
391 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
392 error = EPERM;
393 break;
394 }
395
396 if (tp->t_flags & TF_SIGNATURE) {
397 error = EPERM;
398 break;
399 }
400
401 if (*mtod(m, int *))
402 tp->sack_enable = 1;
403 else
404 tp->sack_enable = 0;
405 break;
406 #ifdef TCP_SIGNATURE
407 case TCP_MD5SIG:
408 if (m == NULL || m->m_len < sizeof (int)) {
409 error = EINVAL;
410 break;
411 }
412
413 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
414 error = EPERM;
415 break;
416 }
417
418 if (*mtod(m, int *)) {
419 tp->t_flags |= TF_SIGNATURE;
420 tp->sack_enable = 0;
421 } else
422 tp->t_flags &= ~TF_SIGNATURE;
423 break;
424 #endif /* TCP_SIGNATURE */
425 default:
426 error = ENOPROTOOPT;
427 break;
428 }
429 break;
430
431 case PRCO_GETOPT:
432 switch (optname) {
433 case TCP_NODELAY:
434 m->m_len = sizeof(int);
435 *mtod(m, int *) = tp->t_flags & TF_NODELAY;
436 break;
437 case TCP_NOPUSH:
438 m->m_len = sizeof(int);
439 *mtod(m, int *) = tp->t_flags & TF_NOPUSH;
440 break;
441 case TCP_MAXSEG:
442 m->m_len = sizeof(int);
443 *mtod(m, int *) = tp->t_maxseg;
444 break;
445 case TCP_SACK_ENABLE:
446 m->m_len = sizeof(int);
447 *mtod(m, int *) = tp->sack_enable;
448 break;
449 case TCP_INFO:
450 error = tcp_fill_info(tp, so, m);
451 break;
452 #ifdef TCP_SIGNATURE
453 case TCP_MD5SIG:
454 m->m_len = sizeof(int);
455 *mtod(m, int *) = tp->t_flags & TF_SIGNATURE;
456 break;
457 #endif
458 default:
459 error = ENOPROTOOPT;
460 break;
461 }
462 break;
463 }
464 return (error);
465 }
466
467 /*
468 * Attach TCP protocol to socket, allocating
469 * internet protocol control block, tcp control block,
470 * buffer space, and entering LISTEN state to accept connections.
471 */
472 int
tcp_attach(struct socket * so,int proto,int wait)473 tcp_attach(struct socket *so, int proto, int wait)
474 {
475 struct inpcbtable *table;
476 struct tcpcb *tp;
477 struct inpcb *inp;
478 int error;
479
480 if (so->so_pcb)
481 return EISCONN;
482 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0 ||
483 sbcheckreserve(so->so_snd.sb_wat, tcp_sendspace) ||
484 sbcheckreserve(so->so_rcv.sb_wat, tcp_recvspace)) {
485 error = soreserve(so, tcp_sendspace, tcp_recvspace);
486 if (error)
487 return (error);
488 }
489
490 NET_ASSERT_LOCKED();
491 #ifdef INET6
492 if (so->so_proto->pr_domain->dom_family == PF_INET6)
493 table = &tcb6table;
494 else
495 #endif
496 table = &tcbtable;
497 error = in_pcballoc(so, table, wait);
498 if (error)
499 return (error);
500 inp = sotoinpcb(so);
501 tp = tcp_newtcpcb(inp, wait);
502 if (tp == NULL) {
503 unsigned int nofd = so->so_state & SS_NOFDREF; /* XXX */
504
505 so->so_state &= ~SS_NOFDREF; /* don't free the socket yet */
506 in_pcbdetach(inp);
507 so->so_state |= nofd;
508 return (ENOBUFS);
509 }
510 tp->t_state = TCPS_CLOSED;
511 #ifdef INET6
512 if (ISSET(inp->inp_flags, INP_IPV6))
513 tp->pf = PF_INET6;
514 else
515 #endif
516 tp->pf = PF_INET;
517 if ((so->so_options & SO_LINGER) && so->so_linger == 0)
518 so->so_linger = TCP_LINGERTIME;
519
520 if (so->so_options & SO_DEBUG)
521 tcp_trace(TA_USER, TCPS_CLOSED, tp, tp, NULL, PRU_ATTACH, 0);
522 return (0);
523 }
524
525 int
tcp_detach(struct socket * so)526 tcp_detach(struct socket *so)
527 {
528 struct inpcb *inp;
529 struct tcpcb *otp = NULL, *tp;
530 int error;
531 short ostate;
532
533 soassertlocked(so);
534
535 if ((error = tcp_sogetpcb(so, &inp, &tp)))
536 return (error);
537
538 if (so->so_options & SO_DEBUG) {
539 otp = tp;
540 ostate = tp->t_state;
541 }
542
543 /*
544 * Detach the TCP protocol from the socket.
545 * If the protocol state is non-embryonic, then can't
546 * do this directly: have to initiate a PRU_DISCONNECT,
547 * which may finish later; embryonic TCB's can just
548 * be discarded here.
549 */
550 tp = tcp_dodisconnect(tp);
551
552 if (otp)
553 tcp_trace(TA_USER, ostate, tp, otp, NULL, PRU_DETACH, 0);
554 return (0);
555 }
556
557 /*
558 * Give the socket an address.
559 */
560 int
tcp_bind(struct socket * so,struct mbuf * nam,struct proc * p)561 tcp_bind(struct socket *so, struct mbuf *nam, struct proc *p)
562 {
563 struct inpcb *inp;
564 struct tcpcb *tp;
565 int error;
566 short ostate;
567
568 soassertlocked(so);
569
570 if ((error = tcp_sogetpcb(so, &inp, &tp)))
571 return (error);
572
573 if (so->so_options & SO_DEBUG)
574 ostate = tp->t_state;
575
576 error = in_pcbbind(inp, nam, p);
577
578 if (so->so_options & SO_DEBUG)
579 tcp_trace(TA_USER, ostate, tp, tp, NULL, PRU_BIND, 0);
580 return (error);
581 }
582
583 /*
584 * Prepare to accept connections.
585 */
586 int
tcp_listen(struct socket * so)587 tcp_listen(struct socket *so)
588 {
589 struct inpcb *inp;
590 struct tcpcb *tp, *otp = NULL;
591 int error;
592 short ostate;
593
594 soassertlocked(so);
595
596 if ((error = tcp_sogetpcb(so, &inp, &tp)))
597 return (error);
598
599 if (so->so_options & SO_DEBUG) {
600 otp = tp;
601 ostate = tp->t_state;
602 }
603
604 if (inp->inp_lport == 0)
605 if ((error = in_pcbbind(inp, NULL, curproc)))
606 goto out;
607
608 /*
609 * If the in_pcbbind() above is called, the tp->pf
610 * should still be whatever it was before.
611 */
612 tp->t_state = TCPS_LISTEN;
613
614 out:
615 if (otp)
616 tcp_trace(TA_USER, ostate, tp, otp, NULL, PRU_LISTEN, 0);
617 return (error);
618 }
619
620 /*
621 * Initiate connection to peer.
622 * Create a template for use in transmissions on this connection.
623 * Enter SYN_SENT state, and mark socket as connecting.
624 * Start keep-alive timer, and seed output sequence space.
625 * Send initial segment on connection.
626 */
627 int
tcp_connect(struct socket * so,struct mbuf * nam)628 tcp_connect(struct socket *so, struct mbuf *nam)
629 {
630 struct inpcb *inp;
631 struct tcpcb *tp, *otp = NULL;
632 int error;
633 short ostate;
634
635 soassertlocked(so);
636
637 if ((error = tcp_sogetpcb(so, &inp, &tp)))
638 return (error);
639
640 if (so->so_options & SO_DEBUG) {
641 otp = tp;
642 ostate = tp->t_state;
643 }
644
645 #ifdef INET6
646 if (ISSET(inp->inp_flags, INP_IPV6)) {
647 struct sockaddr_in6 *sin6;
648
649 if ((error = in6_nam2sin6(nam, &sin6)))
650 goto out;
651 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
652 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
653 error = EINVAL;
654 goto out;
655 }
656 } else
657 #endif
658 {
659 struct sockaddr_in *sin;
660
661 if ((error = in_nam2sin(nam, &sin)))
662 goto out;
663 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
664 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
665 IN_MULTICAST(sin->sin_addr.s_addr) ||
666 in_broadcast(sin->sin_addr, inp->inp_rtableid)) {
667 error = EINVAL;
668 goto out;
669 }
670 }
671 error = in_pcbconnect(inp, nam);
672 if (error)
673 goto out;
674
675 tp->t_template = tcp_template(tp);
676 if (tp->t_template == 0) {
677 in_pcbunset_faddr(inp);
678 in_pcbdisconnect(inp);
679 error = ENOBUFS;
680 goto out;
681 }
682
683 so->so_state |= SS_CONNECTOUT;
684
685 /* Compute window scaling to request. */
686 tcp_rscale(tp, sb_max);
687
688 soisconnecting(so);
689 tcpstat_inc(tcps_connattempt);
690 tp->t_state = TCPS_SYN_SENT;
691 TCP_TIMER_ARM(tp, TCPT_KEEP, atomic_load_int(&tcp_keepinit));
692 tcp_set_iss_tsm(tp);
693 tcp_sendseqinit(tp);
694 tp->snd_last = tp->snd_una;
695 error = tcp_output(tp);
696
697 out:
698 if (otp)
699 tcp_trace(TA_USER, ostate, tp, otp, NULL, PRU_CONNECT, 0);
700 return (error);
701 }
702
703 /*
704 * Accept a connection. Essentially all the work is done at higher
705 * levels; just return the address of the peer, storing through addr.
706 */
707 int
tcp_accept(struct socket * so,struct mbuf * nam)708 tcp_accept(struct socket *so, struct mbuf *nam)
709 {
710 struct inpcb *inp;
711 struct tcpcb *tp;
712 int error;
713
714 soassertlocked(so);
715
716 if ((error = tcp_sogetpcb(so, &inp, &tp)))
717 return (error);
718
719 in_setpeeraddr(inp, nam);
720
721 if (so->so_options & SO_DEBUG)
722 tcp_trace(TA_USER, tp->t_state, tp, tp, NULL, PRU_ACCEPT, 0);
723 return (0);
724 }
725
726 /*
727 * Initiate disconnect from peer.
728 * If connection never passed embryonic stage, just drop;
729 * else if don't need to let data drain, then can just drop anyways,
730 * else have to begin TCP shutdown process: mark socket disconnecting,
731 * drain unread data, state switch to reflect user close, and
732 * send segment (e.g. FIN) to peer. Socket will be really disconnected
733 * when peer sends FIN and acks ours.
734 *
735 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
736 */
737 int
tcp_disconnect(struct socket * so)738 tcp_disconnect(struct socket *so)
739 {
740 struct inpcb *inp;
741 struct tcpcb *tp, *otp = NULL;
742 int error;
743 short ostate;
744
745 soassertlocked(so);
746
747 if ((error = tcp_sogetpcb(so, &inp, &tp)))
748 return (error);
749
750 if (so->so_options & SO_DEBUG) {
751 otp = tp;
752 ostate = tp->t_state;
753 }
754
755 tp = tcp_dodisconnect(tp);
756
757 if (otp)
758 tcp_trace(TA_USER, ostate, tp, otp, NULL, PRU_DISCONNECT, 0);
759 return (0);
760 }
761
762 /*
763 * Mark the connection as being incapable of further output.
764 */
765 int
tcp_shutdown(struct socket * so)766 tcp_shutdown(struct socket *so)
767 {
768 struct inpcb *inp;
769 struct tcpcb *tp, *otp = NULL;
770 int error;
771 short ostate;
772
773 soassertlocked(so);
774
775 if ((error = tcp_sogetpcb(so, &inp, &tp)))
776 return (error);
777
778 if (so->so_options & SO_DEBUG) {
779 otp = tp;
780 ostate = tp->t_state;
781 }
782
783 if (so->so_snd.sb_state & SS_CANTSENDMORE)
784 goto out;
785
786 socantsendmore(so);
787 tp = tcp_usrclosed(tp);
788 if (tp)
789 error = tcp_output(tp);
790
791 out:
792 if (otp)
793 tcp_trace(TA_USER, ostate, tp, otp, NULL, PRU_SHUTDOWN, 0);
794 return (error);
795 }
796
797 /*
798 * After a receive, possibly send window update to peer.
799 */
800 void
tcp_rcvd(struct socket * so)801 tcp_rcvd(struct socket *so)
802 {
803 struct inpcb *inp;
804 struct tcpcb *tp;
805 short ostate;
806
807 soassertlocked(so);
808
809 if (tcp_sogetpcb(so, &inp, &tp))
810 return;
811
812 if (so->so_options & SO_DEBUG)
813 ostate = tp->t_state;
814
815 /*
816 * soreceive() calls this function when a user receives
817 * ancillary data on a listening socket. We don't call
818 * tcp_output in such a case, since there is no header
819 * template for a listening socket and hence the kernel
820 * will panic.
821 */
822 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) != 0)
823 (void) tcp_output(tp);
824
825 if (so->so_options & SO_DEBUG)
826 tcp_trace(TA_USER, ostate, tp, tp, NULL, PRU_RCVD, 0);
827 }
828
829 /*
830 * Do a send by putting data in output queue and updating urgent
831 * marker if URG set. Possibly send more data.
832 */
833 int
tcp_send(struct socket * so,struct mbuf * m,struct mbuf * nam,struct mbuf * control)834 tcp_send(struct socket *so, struct mbuf *m, struct mbuf *nam,
835 struct mbuf *control)
836 {
837 struct inpcb *inp;
838 struct tcpcb *tp;
839 int error;
840 short ostate;
841
842 soassertlocked(so);
843
844 if (control && control->m_len) {
845 error = EINVAL;
846 goto out;
847 }
848
849 if ((error = tcp_sogetpcb(so, &inp, &tp)))
850 goto out;
851
852 if (so->so_options & SO_DEBUG)
853 ostate = tp->t_state;
854
855 mtx_enter(&so->so_snd.sb_mtx);
856 sbappendstream(so, &so->so_snd, m);
857 mtx_leave(&so->so_snd.sb_mtx);
858 m = NULL;
859
860 error = tcp_output(tp);
861
862 if (so->so_options & SO_DEBUG)
863 tcp_trace(TA_USER, ostate, tp, tp, NULL, PRU_SEND, 0);
864
865 out:
866 m_freem(control);
867 m_freem(m);
868
869 return (error);
870 }
871
872 /*
873 * Abort the TCP.
874 */
875 void
tcp_abort(struct socket * so)876 tcp_abort(struct socket *so)
877 {
878 struct inpcb *inp;
879 struct tcpcb *tp, *otp = NULL;
880 short ostate;
881
882 soassertlocked(so);
883
884 if (tcp_sogetpcb(so, &inp, &tp))
885 return;
886
887 if (so->so_options & SO_DEBUG) {
888 otp = tp;
889 ostate = tp->t_state;
890 }
891
892 tp = tcp_drop(tp, ECONNABORTED);
893
894 if (otp)
895 tcp_trace(TA_USER, ostate, tp, otp, NULL, PRU_ABORT, 0);
896 }
897
898 int
tcp_sense(struct socket * so,struct stat * ub)899 tcp_sense(struct socket *so, struct stat *ub)
900 {
901 struct inpcb *inp;
902 struct tcpcb *tp;
903 int error;
904
905 soassertlocked(so);
906
907 if ((error = tcp_sogetpcb(so, &inp, &tp)))
908 return (error);
909
910 mtx_enter(&so->so_snd.sb_mtx);
911 ub->st_blksize = so->so_snd.sb_hiwat;
912 mtx_leave(&so->so_snd.sb_mtx);
913
914 if (so->so_options & SO_DEBUG)
915 tcp_trace(TA_USER, tp->t_state, tp, tp, NULL, PRU_SENSE, 0);
916 return (0);
917 }
918
919 int
tcp_rcvoob(struct socket * so,struct mbuf * m,int flags)920 tcp_rcvoob(struct socket *so, struct mbuf *m, int flags)
921 {
922 struct inpcb *inp;
923 struct tcpcb *tp;
924 int error;
925
926 soassertlocked(so);
927
928 if ((error = tcp_sogetpcb(so, &inp, &tp)))
929 return (error);
930
931 if ((so->so_oobmark == 0 &&
932 (so->so_rcv.sb_state & SS_RCVATMARK) == 0) ||
933 so->so_options & SO_OOBINLINE ||
934 tp->t_oobflags & TCPOOB_HADDATA) {
935 error = EINVAL;
936 goto out;
937 }
938 if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
939 error = EWOULDBLOCK;
940 goto out;
941 }
942 m->m_len = 1;
943 *mtod(m, caddr_t) = tp->t_iobc;
944 if ((flags & MSG_PEEK) == 0)
945 tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
946 out:
947 if (so->so_options & SO_DEBUG)
948 tcp_trace(TA_USER, tp->t_state, tp, tp, NULL, PRU_RCVOOB, 0);
949 return (error);
950 }
951
952 int
tcp_sendoob(struct socket * so,struct mbuf * m,struct mbuf * nam,struct mbuf * control)953 tcp_sendoob(struct socket *so, struct mbuf *m, struct mbuf *nam,
954 struct mbuf *control)
955 {
956 struct inpcb *inp;
957 struct tcpcb *tp;
958 int error;
959 short ostate;
960
961 soassertlocked(so);
962
963 if (control && control->m_len) {
964 error = EINVAL;
965 goto release;
966 }
967
968 if ((error = tcp_sogetpcb(so, &inp, &tp)))
969 goto release;
970
971 if (so->so_options & SO_DEBUG)
972 ostate = tp->t_state;
973
974 if (sbspace(so, &so->so_snd) < -512) {
975 error = ENOBUFS;
976 goto out;
977 }
978
979 /*
980 * According to RFC961 (Assigned Protocols),
981 * the urgent pointer points to the last octet
982 * of urgent data. We continue, however,
983 * to consider it to indicate the first octet
984 * of data past the urgent section.
985 * Otherwise, snd_up should be one lower.
986 */
987 mtx_enter(&so->so_snd.sb_mtx);
988 sbappendstream(so, &so->so_snd, m);
989 mtx_leave(&so->so_snd.sb_mtx);
990 m = NULL;
991 tp->snd_up = tp->snd_una + so->so_snd.sb_cc;
992 tp->t_force = 1;
993 error = tcp_output(tp);
994 tp->t_force = 0;
995
996 out:
997 if (so->so_options & SO_DEBUG)
998 tcp_trace(TA_USER, ostate, tp, tp, NULL, PRU_SENDOOB, 0);
999
1000 release:
1001 m_freem(control);
1002 m_freem(m);
1003
1004 return (error);
1005 }
1006
1007 int
tcp_sockaddr(struct socket * so,struct mbuf * nam)1008 tcp_sockaddr(struct socket *so, struct mbuf *nam)
1009 {
1010 struct inpcb *inp;
1011 struct tcpcb *tp;
1012 int error;
1013
1014 soassertlocked(so);
1015
1016 if ((error = tcp_sogetpcb(so, &inp, &tp)))
1017 return (error);
1018
1019 in_setsockaddr(inp, nam);
1020
1021 if (so->so_options & SO_DEBUG)
1022 tcp_trace(TA_USER, tp->t_state, tp, tp, NULL,
1023 PRU_SOCKADDR, 0);
1024 return (0);
1025 }
1026
1027 int
tcp_peeraddr(struct socket * so,struct mbuf * nam)1028 tcp_peeraddr(struct socket *so, struct mbuf *nam)
1029 {
1030 struct inpcb *inp;
1031 struct tcpcb *tp;
1032 int error;
1033
1034 soassertlocked(so);
1035
1036 if ((error = tcp_sogetpcb(so, &inp, &tp)))
1037 return (error);
1038
1039 in_setpeeraddr(inp, nam);
1040
1041 if (so->so_options & SO_DEBUG)
1042 tcp_trace(TA_USER, tp->t_state, tp, tp, NULL, PRU_PEERADDR, 0);
1043 return (0);
1044 }
1045
1046 /*
1047 * Initiate (or continue) disconnect.
1048 * If embryonic state, just send reset (once).
1049 * If in ``let data drain'' option and linger null, just drop.
1050 * Otherwise (hard), mark socket disconnecting and drop
1051 * current input data; switch states based on user close, and
1052 * send segment to peer (with FIN).
1053 */
1054 struct tcpcb *
tcp_dodisconnect(struct tcpcb * tp)1055 tcp_dodisconnect(struct tcpcb *tp)
1056 {
1057 struct socket *so = tp->t_inpcb->inp_socket;
1058
1059 if (TCPS_HAVEESTABLISHED(tp->t_state) == 0)
1060 tp = tcp_close(tp);
1061 else if ((so->so_options & SO_LINGER) && so->so_linger == 0)
1062 tp = tcp_drop(tp, 0);
1063 else {
1064 soisdisconnecting(so);
1065 mtx_enter(&so->so_rcv.sb_mtx);
1066 sbflush(so, &so->so_rcv);
1067 mtx_leave(&so->so_rcv.sb_mtx);
1068 tp = tcp_usrclosed(tp);
1069 if (tp)
1070 (void) tcp_output(tp);
1071 }
1072 return (tp);
1073 }
1074
1075 /*
1076 * User issued close, and wish to trail through shutdown states:
1077 * if never received SYN, just forget it. If got a SYN from peer,
1078 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
1079 * If already got a FIN from peer, then almost done; go to LAST_ACK
1080 * state. In all other cases, have already sent FIN to peer (e.g.
1081 * after PRU_SHUTDOWN), and just have to play tedious game waiting
1082 * for peer to send FIN or not respond to keep-alives, etc.
1083 * We can let the user exit from the close as soon as the FIN is acked.
1084 */
1085 struct tcpcb *
tcp_usrclosed(struct tcpcb * tp)1086 tcp_usrclosed(struct tcpcb *tp)
1087 {
1088
1089 switch (tp->t_state) {
1090
1091 case TCPS_CLOSED:
1092 case TCPS_LISTEN:
1093 case TCPS_SYN_SENT:
1094 tp->t_state = TCPS_CLOSED;
1095 tp = tcp_close(tp);
1096 break;
1097
1098 case TCPS_SYN_RECEIVED:
1099 case TCPS_ESTABLISHED:
1100 tp->t_state = TCPS_FIN_WAIT_1;
1101 break;
1102
1103 case TCPS_CLOSE_WAIT:
1104 tp->t_state = TCPS_LAST_ACK;
1105 break;
1106 }
1107 if (tp && tp->t_state >= TCPS_FIN_WAIT_2) {
1108 soisdisconnected(tp->t_inpcb->inp_socket);
1109 /*
1110 * If we are in FIN_WAIT_2, we arrived here because the
1111 * application did a shutdown of the send side. Like the
1112 * case of a transition from FIN_WAIT_1 to FIN_WAIT_2 after
1113 * a full close, we start a timer to make sure sockets are
1114 * not left in FIN_WAIT_2 forever.
1115 */
1116 if (tp->t_state == TCPS_FIN_WAIT_2) {
1117 int maxidle;
1118
1119 maxidle = TCPTV_KEEPCNT *
1120 atomic_load_int(&tcp_keepidle);
1121 TCP_TIMER_ARM(tp, TCPT_2MSL, maxidle);
1122 }
1123 }
1124 return (tp);
1125 }
1126
1127 /*
1128 * Look up a socket for ident or tcpdrop, ...
1129 */
1130 int
tcp_ident(void * oldp,size_t * oldlenp,void * newp,size_t newlen,int dodrop)1131 tcp_ident(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int dodrop)
1132 {
1133 int error = 0;
1134 struct tcp_ident_mapping tir;
1135 struct inpcb *inp;
1136 struct socket *so = NULL;
1137 struct sockaddr_in *fin, *lin;
1138 #ifdef INET6
1139 struct sockaddr_in6 *fin6, *lin6;
1140 struct in6_addr f6, l6;
1141 #endif
1142
1143 if (dodrop) {
1144 if (oldp != NULL || *oldlenp != 0)
1145 return (EINVAL);
1146 if (newp == NULL)
1147 return (EPERM);
1148 if (newlen < sizeof(tir))
1149 return (ENOMEM);
1150 if ((error = copyin(newp, &tir, sizeof (tir))) != 0 )
1151 return (error);
1152 } else {
1153 if (oldp == NULL)
1154 return (EINVAL);
1155 if (*oldlenp < sizeof(tir))
1156 return (ENOMEM);
1157 if (newp != NULL || newlen != 0)
1158 return (EINVAL);
1159 if ((error = copyin(oldp, &tir, sizeof (tir))) != 0 )
1160 return (error);
1161 }
1162
1163 NET_LOCK_SHARED();
1164
1165 switch (tir.faddr.ss_family) {
1166 #ifdef INET6
1167 case AF_INET6:
1168 if (tir.laddr.ss_family != AF_INET6) {
1169 NET_UNLOCK_SHARED();
1170 return (EAFNOSUPPORT);
1171 }
1172 fin6 = (struct sockaddr_in6 *)&tir.faddr;
1173 error = in6_embedscope(&f6, fin6, NULL, NULL);
1174 if (error) {
1175 NET_UNLOCK_SHARED();
1176 return EINVAL; /*?*/
1177 }
1178 lin6 = (struct sockaddr_in6 *)&tir.laddr;
1179 error = in6_embedscope(&l6, lin6, NULL, NULL);
1180 if (error) {
1181 NET_UNLOCK_SHARED();
1182 return EINVAL; /*?*/
1183 }
1184 break;
1185 #endif
1186 case AF_INET:
1187 if (tir.laddr.ss_family != AF_INET) {
1188 NET_UNLOCK_SHARED();
1189 return (EAFNOSUPPORT);
1190 }
1191 fin = (struct sockaddr_in *)&tir.faddr;
1192 lin = (struct sockaddr_in *)&tir.laddr;
1193 break;
1194 default:
1195 NET_UNLOCK_SHARED();
1196 return (EAFNOSUPPORT);
1197 }
1198
1199 switch (tir.faddr.ss_family) {
1200 #ifdef INET6
1201 case AF_INET6:
1202 inp = in6_pcblookup(&tcb6table, &f6,
1203 fin6->sin6_port, &l6, lin6->sin6_port, tir.rdomain);
1204 break;
1205 #endif
1206 case AF_INET:
1207 inp = in_pcblookup(&tcbtable, fin->sin_addr,
1208 fin->sin_port, lin->sin_addr, lin->sin_port, tir.rdomain);
1209 break;
1210 default:
1211 unhandled_af(tir.faddr.ss_family);
1212 }
1213
1214 if (dodrop) {
1215 struct tcpcb *tp = NULL;
1216
1217 if (inp != NULL) {
1218 so = in_pcbsolock_ref(inp);
1219 if (so != NULL)
1220 tp = intotcpcb(inp);
1221 }
1222 if (tp != NULL && !ISSET(so->so_options, SO_ACCEPTCONN))
1223 tp = tcp_drop(tp, ECONNABORTED);
1224 else
1225 error = ESRCH;
1226
1227 in_pcbsounlock_rele(inp, so);
1228 NET_UNLOCK_SHARED();
1229 in_pcbunref(inp);
1230 return (error);
1231 }
1232
1233 if (inp == NULL) {
1234 tcpstat_inc(tcps_pcbhashmiss);
1235 switch (tir.faddr.ss_family) {
1236 #ifdef INET6
1237 case AF_INET6:
1238 inp = in6_pcblookup_listen(&tcb6table,
1239 &l6, lin6->sin6_port, NULL, tir.rdomain);
1240 break;
1241 #endif
1242 case AF_INET:
1243 inp = in_pcblookup_listen(&tcbtable,
1244 lin->sin_addr, lin->sin_port, NULL, tir.rdomain);
1245 break;
1246 }
1247 }
1248
1249 if (inp != NULL)
1250 so = in_pcbsolock_ref(inp);
1251
1252 if (so != NULL && ISSET(so->so_state, SS_CONNECTOUT)) {
1253 tir.ruid = so->so_ruid;
1254 tir.euid = so->so_euid;
1255 } else {
1256 tir.ruid = -1;
1257 tir.euid = -1;
1258 }
1259
1260 in_pcbsounlock_rele(inp, so);
1261 NET_UNLOCK_SHARED();
1262 in_pcbunref(inp);
1263
1264 *oldlenp = sizeof(tir);
1265 return copyout(&tir, oldp, sizeof(tir));
1266 }
1267
1268 int
tcp_sysctl_tcpstat(void * oldp,size_t * oldlenp,void * newp)1269 tcp_sysctl_tcpstat(void *oldp, size_t *oldlenp, void *newp)
1270 {
1271 uint64_t counters[tcps_ncounters];
1272 struct tcpstat tcpstat;
1273 struct syn_cache_set *set;
1274 int i = 0;
1275
1276 #define ASSIGN(field) do { tcpstat.field = counters[i++]; } while (0)
1277
1278 memset(&tcpstat, 0, sizeof tcpstat);
1279 counters_read(tcpcounters, counters, nitems(counters), NULL);
1280 ASSIGN(tcps_connattempt);
1281 ASSIGN(tcps_accepts);
1282 ASSIGN(tcps_connects);
1283 ASSIGN(tcps_drops);
1284 ASSIGN(tcps_conndrops);
1285 ASSIGN(tcps_closed);
1286 ASSIGN(tcps_segstimed);
1287 ASSIGN(tcps_rttupdated);
1288 ASSIGN(tcps_delack);
1289 ASSIGN(tcps_timeoutdrop);
1290 ASSIGN(tcps_rexmttimeo);
1291 ASSIGN(tcps_persisttimeo);
1292 ASSIGN(tcps_persistdrop);
1293 ASSIGN(tcps_keeptimeo);
1294 ASSIGN(tcps_keepprobe);
1295 ASSIGN(tcps_keepdrops);
1296 ASSIGN(tcps_sndtotal);
1297 ASSIGN(tcps_sndpack);
1298 ASSIGN(tcps_sndbyte);
1299 ASSIGN(tcps_sndrexmitpack);
1300 ASSIGN(tcps_sndrexmitbyte);
1301 ASSIGN(tcps_sndrexmitfast);
1302 ASSIGN(tcps_sndacks);
1303 ASSIGN(tcps_sndprobe);
1304 ASSIGN(tcps_sndurg);
1305 ASSIGN(tcps_sndwinup);
1306 ASSIGN(tcps_sndctrl);
1307 ASSIGN(tcps_rcvtotal);
1308 ASSIGN(tcps_rcvpack);
1309 ASSIGN(tcps_rcvbyte);
1310 ASSIGN(tcps_rcvbadsum);
1311 ASSIGN(tcps_rcvbadoff);
1312 ASSIGN(tcps_rcvmemdrop);
1313 ASSIGN(tcps_rcvnosec);
1314 ASSIGN(tcps_rcvshort);
1315 ASSIGN(tcps_rcvduppack);
1316 ASSIGN(tcps_rcvdupbyte);
1317 ASSIGN(tcps_rcvpartduppack);
1318 ASSIGN(tcps_rcvpartdupbyte);
1319 ASSIGN(tcps_rcvoopack);
1320 ASSIGN(tcps_rcvoobyte);
1321 ASSIGN(tcps_rcvpackafterwin);
1322 ASSIGN(tcps_rcvbyteafterwin);
1323 ASSIGN(tcps_rcvafterclose);
1324 ASSIGN(tcps_rcvwinprobe);
1325 ASSIGN(tcps_rcvdupack);
1326 ASSIGN(tcps_rcvacktoomuch);
1327 ASSIGN(tcps_rcvacktooold);
1328 ASSIGN(tcps_rcvackpack);
1329 ASSIGN(tcps_rcvackbyte);
1330 ASSIGN(tcps_rcvwinupd);
1331 ASSIGN(tcps_pawsdrop);
1332 ASSIGN(tcps_predack);
1333 ASSIGN(tcps_preddat);
1334 ASSIGN(tcps_pcbhashmiss);
1335 ASSIGN(tcps_noport);
1336 ASSIGN(tcps_badsyn);
1337 ASSIGN(tcps_dropsyn);
1338 ASSIGN(tcps_rcvbadsig);
1339 ASSIGN(tcps_rcvgoodsig);
1340 ASSIGN(tcps_inswcsum);
1341 ASSIGN(tcps_outswcsum);
1342 ASSIGN(tcps_ecn_accepts);
1343 ASSIGN(tcps_ecn_rcvece);
1344 ASSIGN(tcps_ecn_rcvcwr);
1345 ASSIGN(tcps_ecn_rcvce);
1346 ASSIGN(tcps_ecn_sndect);
1347 ASSIGN(tcps_ecn_sndece);
1348 ASSIGN(tcps_ecn_sndcwr);
1349 ASSIGN(tcps_cwr_ecn);
1350 ASSIGN(tcps_cwr_frecovery);
1351 ASSIGN(tcps_cwr_timeout);
1352 ASSIGN(tcps_sc_added);
1353 ASSIGN(tcps_sc_completed);
1354 ASSIGN(tcps_sc_timed_out);
1355 ASSIGN(tcps_sc_overflowed);
1356 ASSIGN(tcps_sc_reset);
1357 ASSIGN(tcps_sc_unreach);
1358 ASSIGN(tcps_sc_bucketoverflow);
1359 ASSIGN(tcps_sc_aborted);
1360 ASSIGN(tcps_sc_dupesyn);
1361 ASSIGN(tcps_sc_dropped);
1362 ASSIGN(tcps_sc_collisions);
1363 ASSIGN(tcps_sc_retransmitted);
1364 ASSIGN(tcps_sc_seedrandom);
1365 ASSIGN(tcps_sc_hash_size);
1366 ASSIGN(tcps_sc_entry_count);
1367 ASSIGN(tcps_sc_entry_limit);
1368 ASSIGN(tcps_sc_bucket_maxlen);
1369 ASSIGN(tcps_sc_bucket_limit);
1370 ASSIGN(tcps_sc_uses_left);
1371 ASSIGN(tcps_conndrained);
1372 ASSIGN(tcps_sack_recovery_episode);
1373 ASSIGN(tcps_sack_rexmits);
1374 ASSIGN(tcps_sack_rexmit_bytes);
1375 ASSIGN(tcps_sack_rcv_opts);
1376 ASSIGN(tcps_sack_snd_opts);
1377 ASSIGN(tcps_sack_drop_opts);
1378 ASSIGN(tcps_outswtso);
1379 ASSIGN(tcps_outhwtso);
1380 ASSIGN(tcps_outpkttso);
1381 ASSIGN(tcps_outbadtso);
1382 ASSIGN(tcps_inswlro);
1383 ASSIGN(tcps_inhwlro);
1384 ASSIGN(tcps_inpktlro);
1385 ASSIGN(tcps_inbadlro);
1386
1387 #undef ASSIGN
1388
1389 mtx_enter(&syn_cache_mtx);
1390 set = &tcp_syn_cache[tcp_syn_cache_active];
1391 tcpstat.tcps_sc_hash_size = set->scs_size;
1392 tcpstat.tcps_sc_entry_count = set->scs_count;
1393 tcpstat.tcps_sc_entry_limit = atomic_load_int(&tcp_syn_cache_limit);
1394 tcpstat.tcps_sc_bucket_maxlen = 0;
1395 for (i = 0; i < set->scs_size; i++) {
1396 if (tcpstat.tcps_sc_bucket_maxlen <
1397 set->scs_buckethead[i].sch_length)
1398 tcpstat.tcps_sc_bucket_maxlen =
1399 set->scs_buckethead[i].sch_length;
1400 }
1401 tcpstat.tcps_sc_bucket_limit = atomic_load_int(&tcp_syn_bucket_limit);
1402 tcpstat.tcps_sc_uses_left = set->scs_use;
1403 mtx_leave(&syn_cache_mtx);
1404
1405 return (sysctl_rdstruct(oldp, oldlenp, newp,
1406 &tcpstat, sizeof(tcpstat)));
1407 }
1408
1409 /*
1410 * Sysctl for tcp variables.
1411 */
1412 int
tcp_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1413 tcp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1414 size_t newlen)
1415 {
1416 int error, oval, nval;
1417
1418 /* All sysctl names at this level are terminal. */
1419 if (namelen != 1)
1420 return (ENOTDIR);
1421
1422 switch (name[0]) {
1423 case TCPCTL_BADDYNAMIC:
1424 NET_LOCK();
1425 error = sysctl_struct(oldp, oldlenp, newp, newlen,
1426 baddynamicports.tcp, sizeof(baddynamicports.tcp));
1427 NET_UNLOCK();
1428 return (error);
1429
1430 case TCPCTL_ROOTONLY:
1431 if (newp && securelevel > 0)
1432 return (EPERM);
1433 NET_LOCK();
1434 error = sysctl_struct(oldp, oldlenp, newp, newlen,
1435 rootonlyports.tcp, sizeof(rootonlyports.tcp));
1436 NET_UNLOCK();
1437 return (error);
1438
1439 case TCPCTL_IDENT:
1440 return tcp_ident(oldp, oldlenp, newp, newlen, 0);
1441
1442 case TCPCTL_DROP:
1443 return tcp_ident(oldp, oldlenp, newp, newlen, 1);
1444
1445 case TCPCTL_REASS_LIMIT:
1446 NET_LOCK();
1447 nval = tcp_reass_limit;
1448 error = sysctl_int(oldp, oldlenp, newp, newlen, &nval);
1449 if (!error && nval != tcp_reass_limit) {
1450 error = pool_sethardlimit(&tcpqe_pool, nval, NULL, 0);
1451 if (!error)
1452 tcp_reass_limit = nval;
1453 }
1454 NET_UNLOCK();
1455 return (error);
1456
1457 case TCPCTL_SACKHOLE_LIMIT:
1458 NET_LOCK();
1459 nval = tcp_sackhole_limit;
1460 error = sysctl_int(oldp, oldlenp, newp, newlen, &nval);
1461 if (!error && nval != tcp_sackhole_limit) {
1462 error = pool_sethardlimit(&sackhl_pool, nval, NULL, 0);
1463 if (!error)
1464 tcp_sackhole_limit = nval;
1465 }
1466 NET_UNLOCK();
1467 return (error);
1468
1469 case TCPCTL_STATS:
1470 return (tcp_sysctl_tcpstat(oldp, oldlenp, newp));
1471
1472 case TCPCTL_SYN_USE_LIMIT:
1473 oval = nval = atomic_load_int(&tcp_syn_use_limit);
1474 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
1475 &nval, 0, INT_MAX);
1476 if (!error && oval != nval) {
1477 /*
1478 * Global tcp_syn_use_limit is used when reseeding a
1479 * new cache. Also update the value in active cache.
1480 */
1481 mtx_enter(&syn_cache_mtx);
1482 if (tcp_syn_cache[0].scs_use > nval)
1483 tcp_syn_cache[0].scs_use = nval;
1484 if (tcp_syn_cache[1].scs_use > nval)
1485 tcp_syn_cache[1].scs_use = nval;
1486 tcp_syn_use_limit = nval;
1487 mtx_leave(&syn_cache_mtx);
1488 }
1489 return (error);
1490
1491 case TCPCTL_SYN_HASH_SIZE:
1492 oval = nval = atomic_load_int(&tcp_syn_hash_size);
1493 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
1494 &nval, 1, 100000);
1495 if (!error && oval != nval) {
1496 /*
1497 * If global hash size has been changed,
1498 * switch sets as soon as possible. Then
1499 * the actual hash array will be reallocated.
1500 */
1501 mtx_enter(&syn_cache_mtx);
1502 if (tcp_syn_cache[0].scs_size != nval)
1503 tcp_syn_cache[0].scs_use = 0;
1504 if (tcp_syn_cache[1].scs_size != nval)
1505 tcp_syn_cache[1].scs_use = 0;
1506 tcp_syn_hash_size = nval;
1507 mtx_leave(&syn_cache_mtx);
1508 }
1509 return (error);
1510
1511 default:
1512 error = sysctl_bounded_arr(tcpctl_vars, nitems(tcpctl_vars),
1513 name, namelen, oldp, oldlenp, newp, newlen);
1514 switch (name[0]) {
1515 case TCPCTL_KEEPINITTIME:
1516 atomic_store_int(&tcp_keepinit,
1517 atomic_load_int(&tcp_keepinit_sec) * TCP_TIME(1));
1518 break;
1519 case TCPCTL_KEEPIDLE:
1520 atomic_store_int(&tcp_keepidle,
1521 atomic_load_int(&tcp_keepidle_sec) * TCP_TIME(1));
1522 break;
1523 case TCPCTL_KEEPINTVL:
1524 atomic_store_int(&tcp_keepintvl,
1525 atomic_load_int(&tcp_keepintvl_sec) * TCP_TIME(1));
1526 break;
1527 }
1528 return (error);
1529 }
1530 /* NOTREACHED */
1531 }
1532
1533 /*
1534 * Scale the send buffer so that inflight data is not accounted against
1535 * the limit. The buffer will scale with the congestion window, if the
1536 * the receiver stops acking data the window will shrink and therefore
1537 * the buffer size will shrink as well.
1538 * In low memory situation try to shrink the buffer to the initial size
1539 * disabling the send buffer scaling as long as the situation persists.
1540 */
1541 void
tcp_update_sndspace(struct tcpcb * tp)1542 tcp_update_sndspace(struct tcpcb *tp)
1543 {
1544 struct socket *so = tp->t_inpcb->inp_socket;
1545 u_long nmax;
1546
1547 mtx_enter(&so->so_snd.sb_mtx);
1548
1549 nmax = so->so_snd.sb_hiwat;
1550
1551 if (sbchecklowmem()) {
1552 /* low on memory try to get rid of some */
1553 if (tcp_sendspace < nmax)
1554 nmax = tcp_sendspace;
1555 } else if (so->so_snd.sb_wat != tcp_sendspace) {
1556 /* user requested buffer size, auto-scaling disabled */
1557 nmax = so->so_snd.sb_wat;
1558 } else {
1559 /* automatic buffer scaling */
1560 nmax = MIN(sb_max, so->so_snd.sb_wat + tp->snd_max -
1561 tp->snd_una);
1562 }
1563
1564 /* a writable socket must be preserved because of poll(2) semantics */
1565 if (sbspace_locked(so, &so->so_snd) >= so->so_snd.sb_lowat) {
1566 if (nmax < so->so_snd.sb_cc + so->so_snd.sb_lowat)
1567 nmax = so->so_snd.sb_cc + so->so_snd.sb_lowat;
1568 /* keep in sync with sbreserve() calculation */
1569 if (nmax * 8 < so->so_snd.sb_mbcnt + so->so_snd.sb_lowat)
1570 nmax = (so->so_snd.sb_mbcnt+so->so_snd.sb_lowat+7) / 8;
1571 }
1572
1573 /* round to MSS boundary */
1574 nmax = roundup(nmax, tp->t_maxseg);
1575
1576 if (nmax != so->so_snd.sb_hiwat)
1577 sbreserve(so, &so->so_snd, nmax);
1578
1579 mtx_leave(&so->so_snd.sb_mtx);
1580 }
1581
1582 /*
1583 * Scale the recv buffer by looking at how much data was transferred in
1584 * one approximated RTT. If more than a big part of the recv buffer was
1585 * transferred during that time we increase the buffer by a constant.
1586 * In low memory situation try to shrink the buffer to the initial size.
1587 */
1588 void
tcp_update_rcvspace(struct tcpcb * tp)1589 tcp_update_rcvspace(struct tcpcb *tp)
1590 {
1591 struct socket *so = tp->t_inpcb->inp_socket;
1592 u_long nmax;
1593
1594 mtx_enter(&so->so_rcv.sb_mtx);
1595
1596 nmax = so->so_rcv.sb_hiwat;
1597
1598 if (sbchecklowmem()) {
1599 /* low on memory try to get rid of some */
1600 if (tcp_recvspace < nmax)
1601 nmax = tcp_recvspace;
1602 } else if (so->so_rcv.sb_wat != tcp_recvspace) {
1603 /* user requested buffer size, auto-scaling disabled */
1604 nmax = so->so_rcv.sb_wat;
1605 } else {
1606 /* automatic buffer scaling */
1607 if (tp->rfbuf_cnt > so->so_rcv.sb_hiwat / 8 * 7)
1608 nmax = MIN(sb_max, so->so_rcv.sb_hiwat +
1609 tcp_autorcvbuf_inc);
1610 }
1611
1612 /* a readable socket must be preserved because of poll(2) semantics */
1613 mtx_enter(&so->so_snd.sb_mtx);
1614 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat &&
1615 nmax < so->so_snd.sb_lowat)
1616 nmax = so->so_snd.sb_lowat;
1617 mtx_leave(&so->so_snd.sb_mtx);
1618
1619 if (nmax != so->so_rcv.sb_hiwat) {
1620 /* round to MSS boundary */
1621 nmax = roundup(nmax, tp->t_maxseg);
1622 sbreserve(so, &so->so_rcv, nmax);
1623 }
1624
1625 mtx_leave(&so->so_rcv.sb_mtx);
1626 }
1627