1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 2007-2008,2010 7 * Swinburne University of Technology, Melbourne, Australia. 8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 9 * Copyright (c) 2010 The FreeBSD Foundation 10 * Copyright (c) 2010-2011 Juniper Networks, Inc. 11 * All rights reserved. 12 * 13 * Portions of this software were developed at the Centre for Advanced Internet 14 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 15 * James Healy and David Hayes, made possible in part by a grant from the Cisco 16 * University Research Program Fund at Community Foundation Silicon Valley. 17 * 18 * Portions of this software were developed at the Centre for Advanced 19 * Internet Architectures, Swinburne University of Technology, Melbourne, 20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 21 * 22 * Portions of this software were developed by Robert N. M. Watson under 23 * contract to Juniper Networks, Inc. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 3. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 */ 49 50 #include <sys/cdefs.h> 51 #include "opt_inet.h" 52 #include "opt_inet6.h" 53 #include "opt_ipsec.h" 54 #include "opt_rss.h" 55 56 #include <sys/param.h> 57 #include <sys/arb.h> 58 #include <sys/kernel.h> 59 #ifdef TCP_HHOOK 60 #include <sys/hhook.h> 61 #endif 62 #include <sys/malloc.h> 63 #include <sys/mbuf.h> 64 #include <sys/proc.h> /* for proc0 declaration */ 65 #include <sys/protosw.h> 66 #include <sys/qmath.h> 67 #include <sys/sdt.h> 68 #include <sys/signalvar.h> 69 #include <sys/socket.h> 70 #include <sys/socketvar.h> 71 #include <sys/sysctl.h> 72 #include <sys/syslog.h> 73 #include <sys/systm.h> 74 #include <sys/stats.h> 75 76 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 77 78 #include <vm/uma.h> 79 80 #include <net/if.h> 81 #include <net/if_var.h> 82 #include <net/route.h> 83 #include <net/rss_config.h> 84 #include <net/vnet.h> 85 86 #define TCPSTATES /* for logging */ 87 88 #include <netinet/in.h> 89 #include <netinet/in_kdtrace.h> 90 #include <netinet/in_pcb.h> 91 #include <netinet/in_rss.h> 92 #include <netinet/in_systm.h> 93 #include <netinet/ip.h> 94 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 95 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 96 #include <netinet/ip_var.h> 97 #include <netinet/ip_options.h> 98 #include <netinet/ip6.h> 99 #include <netinet/icmp6.h> 100 #include <netinet6/in6_pcb.h> 101 #include <netinet6/in6_rss.h> 102 #include <netinet6/in6_var.h> 103 #include <netinet6/ip6_var.h> 104 #include <netinet6/nd6.h> 105 #include <netinet/tcp.h> 106 #include <netinet/tcp_fsm.h> 107 #include <netinet/tcp_seq.h> 108 #include <netinet/tcp_timer.h> 109 #include <netinet/tcp_var.h> 110 #include <netinet/tcp_log_buf.h> 111 #include <netinet6/tcp6_var.h> 112 #include <netinet/tcpip.h> 113 #include <netinet/cc/cc.h> 114 #include <netinet/tcp_fastopen.h> 115 #ifdef TCPPCAP 116 #include <netinet/tcp_pcap.h> 117 #endif 118 #include <netinet/tcp_syncache.h> 119 #ifdef TCP_OFFLOAD 120 #include <netinet/tcp_offload.h> 121 #endif 122 #include <netinet/tcp_ecn.h> 123 #include <netinet/udp.h> 124 125 #include <netipsec/ipsec_support.h> 126 127 #include <machine/in_cksum.h> 128 129 #include <security/mac/mac_framework.h> 130 131 const int tcprexmtthresh = 3; 132 133 VNET_DEFINE(int, tcp_log_in_vain) = 0; 134 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW, 135 &VNET_NAME(tcp_log_in_vain), 0, 136 "Log all incoming TCP segments to closed ports"); 137 138 VNET_DEFINE(int, blackhole) = 0; 139 #define V_blackhole VNET(blackhole) 140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 141 &VNET_NAME(blackhole), 0, 142 "Do not send RST on segments to closed ports"); 143 144 VNET_DEFINE(bool, blackhole_local) = false; 145 #define V_blackhole_local VNET(blackhole_local) 146 SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, blackhole_local, CTLFLAG_VNET | 147 CTLFLAG_RW, &VNET_NAME(blackhole_local), false, 148 "Enforce net.inet.tcp.blackhole for locally originated packets"); 149 150 VNET_DEFINE(int, tcp_delack_enabled) = 1; 151 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 152 &VNET_NAME(tcp_delack_enabled), 0, 153 "Delay ACK to try and piggyback it onto a data packet"); 154 155 VNET_DEFINE(int, drop_synfin) = 0; 156 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 157 &VNET_NAME(drop_synfin), 0, 158 "Drop TCP packets with SYN+FIN set"); 159 160 VNET_DEFINE(int, tcp_do_prr) = 1; 161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr, CTLFLAG_VNET | CTLFLAG_RW, 162 &VNET_NAME(tcp_do_prr), 1, 163 "Enable Proportional Rate Reduction per RFC 6937"); 164 165 VNET_DEFINE(int, tcp_do_newcwv) = 0; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW, 167 &VNET_NAME(tcp_do_newcwv), 0, 168 "Enable New Congestion Window Validation per RFC7661"); 169 170 VNET_DEFINE(int, tcp_do_rfc3042) = 1; 171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 172 &VNET_NAME(tcp_do_rfc3042), 0, 173 "Enable RFC 3042 (Limited Transmit)"); 174 175 VNET_DEFINE(int, tcp_do_rfc3390) = 1; 176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 177 &VNET_NAME(tcp_do_rfc3390), 0, 178 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 179 180 VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 181 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 182 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 183 "Slow-start flight size (initial congestion window) in number of segments"); 184 185 VNET_DEFINE(int, tcp_do_rfc3465) = 1; 186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 187 &VNET_NAME(tcp_do_rfc3465), 0, 188 "Enable RFC 3465 (Appropriate Byte Counting)"); 189 190 VNET_DEFINE(int, tcp_abc_l_var) = 2; 191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 192 &VNET_NAME(tcp_abc_l_var), 2, 193 "Cap the max cwnd increment during slow-start to this number of segments"); 194 195 VNET_DEFINE(int, tcp_insecure_syn) = 0; 196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 197 &VNET_NAME(tcp_insecure_syn), 0, 198 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 199 200 VNET_DEFINE(int, tcp_insecure_rst) = 0; 201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 202 &VNET_NAME(tcp_insecure_rst), 0, 203 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 204 205 VNET_DEFINE(int, tcp_recvspace) = 1024*64; 206 #define V_tcp_recvspace VNET(tcp_recvspace) 207 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 208 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 209 210 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 212 &VNET_NAME(tcp_do_autorcvbuf), 0, 213 "Enable automatic receive buffer sizing"); 214 215 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 217 &VNET_NAME(tcp_autorcvbuf_max), 0, 218 "Max size of automatic receive buffer"); 219 220 VNET_DEFINE(struct inpcbinfo, tcbinfo); 221 222 /* 223 * TCP statistics are stored in an array of counter(9)s, which size matches 224 * size of struct tcpstat. TCP running connection count is a regular array. 225 */ 226 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 227 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 228 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 229 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]); 230 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD | 231 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES, 232 "TCP connection counts by TCP state"); 233 234 /* 235 * Kernel module interface for updating tcpstat. The first argument is an index 236 * into tcpstat treated as an array. 237 */ 238 void 239 kmod_tcpstat_add(int statnum, int val) 240 { 241 242 counter_u64_add(VNET(tcpstat)[statnum], val); 243 } 244 245 /* 246 * Make sure that we only start a SACK loss recovery when 247 * receiving a duplicate ACK with a SACK block, and also 248 * complete SACK loss recovery in case the other end 249 * reneges. 250 */ 251 static bool inline 252 tcp_is_sack_recovery(struct tcpcb *tp, struct tcpopt *to) 253 { 254 return ((tp->t_flags & TF_SACK_PERMIT) && 255 ((to->to_flags & TOF_SACK) || 256 (!TAILQ_EMPTY(&tp->snd_holes)))); 257 } 258 259 #ifdef TCP_HHOOK 260 /* 261 * Wrapper for the TCP established input helper hook. 262 */ 263 void 264 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 265 { 266 struct tcp_hhook_data hhook_data; 267 268 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 269 hhook_data.tp = tp; 270 hhook_data.th = th; 271 hhook_data.to = to; 272 273 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 274 &tp->t_osd); 275 } 276 } 277 #endif 278 279 /* 280 * CC wrapper hook functions 281 */ 282 void 283 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs, 284 uint16_t type) 285 { 286 #ifdef STATS 287 int32_t gput; 288 #endif 289 290 INP_WLOCK_ASSERT(tptoinpcb(tp)); 291 292 tp->t_ccv.nsegs = nsegs; 293 tp->t_ccv.bytes_this_ack = BYTES_THIS_ACK(tp, th); 294 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) || 295 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) && 296 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2)))) 297 tp->t_ccv.flags |= CCF_CWND_LIMITED; 298 else 299 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 300 301 if (type == CC_ACK) { 302 #ifdef STATS 303 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 304 ((int32_t)tp->snd_cwnd) - tp->snd_wnd); 305 if (!IN_RECOVERY(tp->t_flags)) 306 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN, 307 tp->t_ccv.bytes_this_ack / (tcp_maxseg(tp) * nsegs)); 308 if ((tp->t_flags & TF_GPUTINPROG) && 309 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 310 /* 311 * Compute goodput in bits per millisecond. 312 */ 313 gput = (((int64_t)SEQ_SUB(th->th_ack, tp->gput_seq)) << 3) / 314 max(1, tcp_ts_getticks() - tp->gput_ts); 315 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 316 gput); 317 /* 318 * XXXLAS: This is a temporary hack, and should be 319 * chained off VOI_TCP_GPUT when stats(9) grows an API 320 * to deal with chained VOIs. 321 */ 322 if (tp->t_stats_gput_prev > 0) 323 stats_voi_update_abs_s32(tp->t_stats, 324 VOI_TCP_GPUT_ND, 325 ((gput - tp->t_stats_gput_prev) * 100) / 326 tp->t_stats_gput_prev); 327 tp->t_flags &= ~TF_GPUTINPROG; 328 tp->t_stats_gput_prev = gput; 329 } 330 #endif /* STATS */ 331 if (tp->snd_cwnd > tp->snd_ssthresh) { 332 tp->t_bytes_acked += tp->t_ccv.bytes_this_ack; 333 if (tp->t_bytes_acked >= tp->snd_cwnd) { 334 tp->t_bytes_acked -= tp->snd_cwnd; 335 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 336 } 337 } else { 338 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 339 tp->t_bytes_acked = 0; 340 } 341 } 342 343 if (CC_ALGO(tp)->ack_received != NULL) { 344 /* XXXLAS: Find a way to live without this */ 345 tp->t_ccv.curack = th->th_ack; 346 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 347 } 348 #ifdef STATS 349 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 350 #endif 351 } 352 353 void 354 cc_conn_init(struct tcpcb *tp) 355 { 356 struct hc_metrics_lite metrics; 357 struct inpcb *inp = tptoinpcb(tp); 358 u_int maxseg; 359 int rtt; 360 361 INP_WLOCK_ASSERT(inp); 362 363 tcp_hc_get(&inp->inp_inc, &metrics); 364 maxseg = tcp_maxseg(tp); 365 366 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 367 tp->t_srtt = rtt; 368 TCPSTAT_INC(tcps_usedrtt); 369 if (metrics.rmx_rttvar) { 370 tp->t_rttvar = metrics.rmx_rttvar; 371 TCPSTAT_INC(tcps_usedrttvar); 372 } else { 373 /* default variation is +- 1 rtt */ 374 tp->t_rttvar = 375 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 376 } 377 TCPT_RANGESET(tp->t_rxtcur, 378 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 379 tp->t_rttmin, TCPTV_REXMTMAX); 380 } 381 if (metrics.rmx_ssthresh) { 382 /* 383 * There's some sort of gateway or interface 384 * buffer limit on the path. Use this to set 385 * the slow start threshold, but set the 386 * threshold to no less than 2*mss. 387 */ 388 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); 389 TCPSTAT_INC(tcps_usedssthresh); 390 } 391 392 /* 393 * Set the initial slow-start flight size. 394 * 395 * If a SYN or SYN/ACK was lost and retransmitted, we have to 396 * reduce the initial CWND to one segment as congestion is likely 397 * requiring us to be cautious. 398 */ 399 if (tp->snd_cwnd == 1) 400 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */ 401 else 402 tp->snd_cwnd = tcp_compute_initwnd(maxseg); 403 404 if (CC_ALGO(tp)->conn_init != NULL) 405 CC_ALGO(tp)->conn_init(&tp->t_ccv); 406 } 407 408 void inline 409 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 410 { 411 INP_WLOCK_ASSERT(tptoinpcb(tp)); 412 413 #ifdef STATS 414 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 415 #endif 416 417 switch(type) { 418 case CC_NDUPACK: 419 if (!IN_FASTRECOVERY(tp->t_flags)) { 420 tp->snd_recover = tp->snd_max; 421 if (tp->t_flags2 & TF2_ECN_PERMIT) 422 tp->t_flags2 |= TF2_ECN_SND_CWR; 423 } 424 break; 425 case CC_ECN: 426 if (!IN_CONGRECOVERY(tp->t_flags) || 427 /* 428 * Allow ECN reaction on ACK to CWR, if 429 * that data segment was also CE marked. 430 */ 431 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 432 EXIT_CONGRECOVERY(tp->t_flags); 433 TCPSTAT_INC(tcps_ecn_rcwnd); 434 tp->snd_recover = tp->snd_max + 1; 435 if (tp->t_flags2 & TF2_ECN_PERMIT) 436 tp->t_flags2 |= TF2_ECN_SND_CWR; 437 } 438 break; 439 case CC_RTO: 440 tp->t_dupacks = 0; 441 tp->t_bytes_acked = 0; 442 EXIT_RECOVERY(tp->t_flags); 443 if (tp->t_flags2 & TF2_ECN_PERMIT) 444 tp->t_flags2 |= TF2_ECN_SND_CWR; 445 break; 446 case CC_RTO_ERR: 447 TCPSTAT_INC(tcps_sndrexmitbad); 448 /* RTO was unnecessary, so reset everything. */ 449 tp->snd_cwnd = tp->snd_cwnd_prev; 450 tp->snd_ssthresh = tp->snd_ssthresh_prev; 451 tp->snd_recover = tp->snd_recover_prev; 452 if (tp->t_flags & TF_WASFRECOVERY) 453 ENTER_FASTRECOVERY(tp->t_flags); 454 if (tp->t_flags & TF_WASCRECOVERY) 455 ENTER_CONGRECOVERY(tp->t_flags); 456 tp->snd_nxt = tp->snd_max; 457 tp->t_flags &= ~TF_PREVVALID; 458 tp->t_badrxtwin = 0; 459 break; 460 } 461 462 if (CC_ALGO(tp)->cong_signal != NULL) { 463 if (th != NULL) 464 tp->t_ccv.curack = th->th_ack; 465 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 466 } 467 } 468 469 void inline 470 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 471 { 472 INP_WLOCK_ASSERT(tptoinpcb(tp)); 473 474 /* XXXLAS: KASSERT that we're in recovery? */ 475 476 if (CC_ALGO(tp)->post_recovery != NULL) { 477 tp->t_ccv.curack = th->th_ack; 478 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 479 } 480 /* XXXLAS: EXIT_RECOVERY ? */ 481 tp->t_bytes_acked = 0; 482 tp->sackhint.delivered_data = 0; 483 tp->sackhint.prr_out = 0; 484 } 485 486 /* 487 * Indicate whether this ack should be delayed. We can delay the ack if 488 * following conditions are met: 489 * - There is no delayed ack timer in progress. 490 * - Our last ack wasn't a 0-sized window. We never want to delay 491 * the ack that opens up a 0-sized window. 492 * - LRO wasn't used for this segment. We make sure by checking that the 493 * segment size is not larger than the MSS. 494 */ 495 #define DELAY_ACK(tp, tlen) \ 496 ((!tcp_timer_active(tp, TT_DELACK) && \ 497 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 498 (tlen <= tp->t_maxseg) && \ 499 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 500 501 void inline 502 cc_ecnpkt_handler_flags(struct tcpcb *tp, uint16_t flags, uint8_t iptos) 503 { 504 INP_WLOCK_ASSERT(tptoinpcb(tp)); 505 506 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 507 switch (iptos & IPTOS_ECN_MASK) { 508 case IPTOS_ECN_CE: 509 tp->t_ccv.flags |= CCF_IPHDR_CE; 510 break; 511 case IPTOS_ECN_ECT0: 512 /* FALLTHROUGH */ 513 case IPTOS_ECN_ECT1: 514 /* FALLTHROUGH */ 515 case IPTOS_ECN_NOTECT: 516 tp->t_ccv.flags &= ~CCF_IPHDR_CE; 517 break; 518 } 519 520 if (flags & TH_CWR) 521 tp->t_ccv.flags |= CCF_TCPHDR_CWR; 522 else 523 tp->t_ccv.flags &= ~CCF_TCPHDR_CWR; 524 525 CC_ALGO(tp)->ecnpkt_handler(&tp->t_ccv); 526 527 if (tp->t_ccv.flags & CCF_ACKNOW) { 528 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 529 tp->t_flags |= TF_ACKNOW; 530 } 531 } 532 } 533 534 void inline 535 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 536 { 537 cc_ecnpkt_handler_flags(tp, tcp_get_flags(th), iptos); 538 } 539 540 /* 541 * TCP input handling is split into multiple parts: 542 * tcp6_input is a thin wrapper around tcp_input for the extended 543 * ip6_protox[] call format in ip6_input 544 * tcp_input handles primary segment validation, inpcb lookup and 545 * SYN processing on listen sockets 546 * tcp_do_segment processes the ACK and text of the segment for 547 * establishing, established and closing connections 548 */ 549 #ifdef INET6 550 int 551 tcp6_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port) 552 { 553 struct mbuf *m; 554 struct in6_ifaddr *ia6; 555 struct ip6_hdr *ip6; 556 557 m = *mp; 558 if (m->m_len < *offp + sizeof(struct tcphdr)) { 559 m = m_pullup(m, *offp + sizeof(struct tcphdr)); 560 if (m == NULL) { 561 *mp = m; 562 TCPSTAT_INC(tcps_rcvshort); 563 return (IPPROTO_DONE); 564 } 565 } 566 567 /* 568 * draft-itojun-ipv6-tcp-to-anycast 569 * better place to put this in? 570 */ 571 ip6 = mtod(m, struct ip6_hdr *); 572 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false); 573 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 574 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 575 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 576 *mp = NULL; 577 return (IPPROTO_DONE); 578 } 579 580 *mp = m; 581 return (tcp_input_with_port(mp, offp, proto, port)); 582 } 583 584 int 585 tcp6_input(struct mbuf **mp, int *offp, int proto) 586 { 587 588 return(tcp6_input_with_port(mp, offp, proto, 0)); 589 } 590 #endif /* INET6 */ 591 592 int 593 tcp_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port) 594 { 595 struct mbuf *m = *mp; 596 struct tcphdr *th = NULL; 597 struct ip *ip = NULL; 598 struct inpcb *inp = NULL; 599 struct tcpcb *tp = NULL; 600 struct socket *so = NULL; 601 u_char *optp = NULL; 602 int off0; 603 int optlen = 0; 604 #ifdef INET 605 int len; 606 uint8_t ipttl; 607 #endif 608 int tlen = 0, off; 609 int drop_hdrlen; 610 int thflags; 611 int rstreason = 0; /* For badport_bandlim accounting purposes */ 612 int lookupflag; 613 uint8_t iptos; 614 struct m_tag *fwd_tag = NULL; 615 #ifdef INET6 616 struct ip6_hdr *ip6 = NULL; 617 int isipv6; 618 #else 619 const void *ip6 = NULL; 620 #endif /* INET6 */ 621 struct tcpopt to; /* options in this segment */ 622 char *s = NULL; /* address and port logging */ 623 624 NET_EPOCH_ASSERT(); 625 626 #ifdef INET6 627 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 628 #endif 629 630 off0 = *offp; 631 m = *mp; 632 *mp = NULL; 633 to.to_flags = 0; 634 TCPSTAT_INC(tcps_rcvtotal); 635 636 #ifdef INET6 637 if (isipv6) { 638 ip6 = mtod(m, struct ip6_hdr *); 639 th = (struct tcphdr *)((caddr_t)ip6 + off0); 640 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 641 if (port) 642 goto skip6_csum; 643 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 644 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 645 th->th_sum = m->m_pkthdr.csum_data; 646 else 647 th->th_sum = in6_cksum_pseudo(ip6, tlen, 648 IPPROTO_TCP, m->m_pkthdr.csum_data); 649 th->th_sum ^= 0xffff; 650 } else 651 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 652 if (th->th_sum) { 653 TCPSTAT_INC(tcps_rcvbadsum); 654 goto drop; 655 } 656 skip6_csum: 657 /* 658 * Be proactive about unspecified IPv6 address in source. 659 * As we use all-zero to indicate unbounded/unconnected pcb, 660 * unspecified IPv6 address can be used to confuse us. 661 * 662 * Note that packets with unspecified IPv6 destination is 663 * already dropped in ip6_input. 664 */ 665 KASSERT(!IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst), 666 ("%s: unspecified destination v6 address", __func__)); 667 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 668 IP6STAT_INC(ip6s_badscope); /* XXX */ 669 goto drop; 670 } 671 iptos = IPV6_TRAFFIC_CLASS(ip6); 672 } 673 #endif 674 #if defined(INET) && defined(INET6) 675 else 676 #endif 677 #ifdef INET 678 { 679 /* 680 * Get IP and TCP header together in first mbuf. 681 * Note: IP leaves IP header in first mbuf. 682 */ 683 if (off0 > sizeof (struct ip)) { 684 ip_stripoptions(m); 685 off0 = sizeof(struct ip); 686 } 687 if (m->m_len < sizeof (struct tcpiphdr)) { 688 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 689 == NULL) { 690 TCPSTAT_INC(tcps_rcvshort); 691 return (IPPROTO_DONE); 692 } 693 } 694 ip = mtod(m, struct ip *); 695 th = (struct tcphdr *)((caddr_t)ip + off0); 696 tlen = ntohs(ip->ip_len) - off0; 697 698 iptos = ip->ip_tos; 699 if (port) 700 goto skip_csum; 701 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 702 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 703 th->th_sum = m->m_pkthdr.csum_data; 704 else 705 th->th_sum = in_pseudo(ip->ip_src.s_addr, 706 ip->ip_dst.s_addr, 707 htonl(m->m_pkthdr.csum_data + tlen + 708 IPPROTO_TCP)); 709 th->th_sum ^= 0xffff; 710 } else { 711 struct ipovly *ipov = (struct ipovly *)ip; 712 713 /* 714 * Checksum extended TCP header and data. 715 */ 716 len = off0 + tlen; 717 ipttl = ip->ip_ttl; 718 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 719 ipov->ih_len = htons(tlen); 720 th->th_sum = in_cksum(m, len); 721 /* Reset length for SDT probes. */ 722 ip->ip_len = htons(len); 723 /* Reset TOS bits */ 724 ip->ip_tos = iptos; 725 /* Re-initialization for later version check */ 726 ip->ip_ttl = ipttl; 727 ip->ip_v = IPVERSION; 728 ip->ip_hl = off0 >> 2; 729 } 730 skip_csum: 731 if (th->th_sum && (port == 0)) { 732 TCPSTAT_INC(tcps_rcvbadsum); 733 goto drop; 734 } 735 KASSERT(ip->ip_dst.s_addr != INADDR_ANY, 736 ("%s: unspecified destination v4 address", __func__)); 737 if (__predict_false(ip->ip_src.s_addr == INADDR_ANY)) { 738 IPSTAT_INC(ips_badaddr); 739 goto drop; 740 } 741 } 742 #endif /* INET */ 743 744 /* 745 * Check that TCP offset makes sense, 746 * pull out TCP options and adjust length. XXX 747 */ 748 off = th->th_off << 2; 749 if (off < sizeof (struct tcphdr) || off > tlen) { 750 TCPSTAT_INC(tcps_rcvbadoff); 751 goto drop; 752 } 753 tlen -= off; /* tlen is used instead of ti->ti_len */ 754 if (off > sizeof (struct tcphdr)) { 755 #ifdef INET6 756 if (isipv6) { 757 if (m->m_len < off0 + off) { 758 m = m_pullup(m, off0 + off); 759 if (m == NULL) { 760 TCPSTAT_INC(tcps_rcvshort); 761 return (IPPROTO_DONE); 762 } 763 } 764 ip6 = mtod(m, struct ip6_hdr *); 765 th = (struct tcphdr *)((caddr_t)ip6 + off0); 766 } 767 #endif 768 #if defined(INET) && defined(INET6) 769 else 770 #endif 771 #ifdef INET 772 { 773 if (m->m_len < sizeof(struct ip) + off) { 774 if ((m = m_pullup(m, sizeof (struct ip) + off)) 775 == NULL) { 776 TCPSTAT_INC(tcps_rcvshort); 777 return (IPPROTO_DONE); 778 } 779 ip = mtod(m, struct ip *); 780 th = (struct tcphdr *)((caddr_t)ip + off0); 781 } 782 } 783 #endif 784 optlen = off - sizeof (struct tcphdr); 785 optp = (u_char *)(th + 1); 786 } 787 thflags = tcp_get_flags(th); 788 789 /* 790 * Convert TCP protocol specific fields to host format. 791 */ 792 tcp_fields_to_host(th); 793 794 /* 795 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 796 */ 797 drop_hdrlen = off0 + off; 798 799 /* 800 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 801 */ 802 if ( 803 #ifdef INET6 804 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 805 #ifdef INET 806 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 807 #endif 808 #endif 809 #if defined(INET) && !defined(INET6) 810 (m->m_flags & M_IP_NEXTHOP) 811 #endif 812 ) 813 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 814 815 /* 816 * For initial SYN packets we don't need write lock on matching 817 * PCB, be it a listening one or a synchronized one. The packet 818 * shall not modify its state. 819 */ 820 lookupflag = INPLOOKUP_WILDCARD | 821 ((thflags & (TH_ACK|TH_SYN)) == TH_SYN ? 822 INPLOOKUP_RLOCKPCB : INPLOOKUP_WLOCKPCB); 823 findpcb: 824 #ifdef INET6 825 if (isipv6 && fwd_tag != NULL) { 826 struct sockaddr_in6 *next_hop6; 827 828 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 829 /* 830 * Transparently forwarded. Pretend to be the destination. 831 * Already got one like this? 832 */ 833 inp = in6_pcblookup_mbuf(&V_tcbinfo, 834 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 835 lookupflag & ~INPLOOKUP_WILDCARD, m->m_pkthdr.rcvif, m); 836 if (!inp) { 837 /* 838 * It's new. Try to find the ambushing socket. 839 * Because we've rewritten the destination address, 840 * any hardware-generated hash is ignored. 841 */ 842 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 843 th->th_sport, &next_hop6->sin6_addr, 844 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 845 th->th_dport, lookupflag, m->m_pkthdr.rcvif); 846 } 847 } else if (isipv6) { 848 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 849 th->th_sport, &ip6->ip6_dst, th->th_dport, lookupflag, 850 m->m_pkthdr.rcvif, m); 851 } 852 #endif /* INET6 */ 853 #if defined(INET6) && defined(INET) 854 else 855 #endif 856 #ifdef INET 857 if (fwd_tag != NULL) { 858 struct sockaddr_in *next_hop; 859 860 next_hop = (struct sockaddr_in *)(fwd_tag+1); 861 /* 862 * Transparently forwarded. Pretend to be the destination. 863 * already got one like this? 864 */ 865 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 866 ip->ip_dst, th->th_dport, lookupflag & ~INPLOOKUP_WILDCARD, 867 m->m_pkthdr.rcvif, m); 868 if (!inp) { 869 /* 870 * It's new. Try to find the ambushing socket. 871 * Because we've rewritten the destination address, 872 * any hardware-generated hash is ignored. 873 */ 874 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 875 th->th_sport, next_hop->sin_addr, 876 next_hop->sin_port ? ntohs(next_hop->sin_port) : 877 th->th_dport, lookupflag, m->m_pkthdr.rcvif); 878 } 879 } else 880 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 881 th->th_sport, ip->ip_dst, th->th_dport, lookupflag, 882 m->m_pkthdr.rcvif, m); 883 #endif /* INET */ 884 885 /* 886 * If the INPCB does not exist then all data in the incoming 887 * segment is discarded and an appropriate RST is sent back. 888 * XXX MRT Send RST using which routing table? 889 */ 890 if (inp == NULL) { 891 if (rstreason != 0) { 892 /* We came here after second (safety) lookup. */ 893 MPASS((lookupflag & INPLOOKUP_WILDCARD) == 0); 894 goto dropwithreset; 895 } 896 /* 897 * Log communication attempts to ports that are not 898 * in use. 899 */ 900 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 901 V_tcp_log_in_vain == 2) { 902 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 903 log(LOG_INFO, "%s; %s: Connection attempt " 904 "to closed port\n", s, __func__); 905 } 906 /* 907 * When blackholing do not respond with a RST but 908 * completely ignore the segment and drop it. 909 */ 910 if (((V_blackhole == 1 && (thflags & TH_SYN)) || 911 V_blackhole == 2) && (V_blackhole_local || ( 912 #ifdef INET6 913 isipv6 ? !in6_localaddr(&ip6->ip6_src) : 914 #endif 915 #ifdef INET 916 !in_localip(ip->ip_src) 917 #else 918 true 919 #endif 920 ))) 921 goto dropunlock; 922 923 rstreason = BANDLIM_RST_CLOSEDPORT; 924 goto dropwithreset; 925 } 926 INP_LOCK_ASSERT(inp); 927 928 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 929 !SOLISTENING(inp->inp_socket)) { 930 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 931 inp->inp_flowid = m->m_pkthdr.flowid; 932 inp->inp_flowtype = M_HASHTYPE_GET(m); 933 #ifdef RSS 934 } else { 935 /* assign flowid by software RSS hash */ 936 #ifdef INET6 937 if (isipv6) { 938 rss_proto_software_hash_v6(&inp->in6p_faddr, 939 &inp->in6p_laddr, 940 inp->inp_fport, 941 inp->inp_lport, 942 IPPROTO_TCP, 943 &inp->inp_flowid, 944 &inp->inp_flowtype); 945 } else 946 #endif /* INET6 */ 947 { 948 rss_proto_software_hash_v4(inp->inp_faddr, 949 inp->inp_laddr, 950 inp->inp_fport, 951 inp->inp_lport, 952 IPPROTO_TCP, 953 &inp->inp_flowid, 954 &inp->inp_flowtype); 955 } 956 #endif /* RSS */ 957 } 958 } 959 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 960 #ifdef INET6 961 if (isipv6 && IPSEC_ENABLED(ipv6) && 962 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) { 963 goto dropunlock; 964 } 965 #ifdef INET 966 else 967 #endif 968 #endif /* INET6 */ 969 #ifdef INET 970 if (IPSEC_ENABLED(ipv4) && 971 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) { 972 goto dropunlock; 973 } 974 #endif /* INET */ 975 #endif /* IPSEC */ 976 977 /* 978 * Check the minimum TTL for socket. 979 */ 980 if (inp->inp_ip_minttl != 0) { 981 #ifdef INET6 982 if (isipv6) { 983 if (inp->inp_ip_minttl > ip6->ip6_hlim) 984 goto dropunlock; 985 } else 986 #endif 987 if (inp->inp_ip_minttl > ip->ip_ttl) 988 goto dropunlock; 989 } 990 991 tp = intotcpcb(inp); 992 switch (tp->t_state) { 993 case TCPS_TIME_WAIT: 994 /* 995 * A previous connection in TIMEWAIT state is supposed to catch 996 * stray or duplicate segments arriving late. If this segment 997 * was a legitimate new connection attempt, the old INPCB gets 998 * removed and we can try again to find a listening socket. 999 */ 1000 tcp_dooptions(&to, optp, optlen, 1001 (thflags & TH_SYN) ? TO_SYN : 0); 1002 /* 1003 * tcp_twcheck unlocks the inp always, and frees the m if fails. 1004 */ 1005 if (tcp_twcheck(inp, &to, th, m, tlen)) 1006 goto findpcb; 1007 return (IPPROTO_DONE); 1008 case TCPS_CLOSED: 1009 /* 1010 * The TCPCB may no longer exist if the connection is winding 1011 * down or it is in the CLOSED state. Either way we drop the 1012 * segment and send an appropriate response. 1013 */ 1014 rstreason = BANDLIM_RST_CLOSEDPORT; 1015 goto dropwithreset; 1016 } 1017 1018 if ((tp->t_port != port) && (tp->t_state > TCPS_LISTEN)) { 1019 rstreason = BANDLIM_RST_CLOSEDPORT; 1020 goto dropwithreset; 1021 } 1022 1023 #ifdef TCP_OFFLOAD 1024 if (tp->t_flags & TF_TOE) { 1025 tcp_offload_input(tp, m); 1026 m = NULL; /* consumed by the TOE driver */ 1027 goto dropunlock; 1028 } 1029 #endif 1030 1031 #ifdef MAC 1032 if (mac_inpcb_check_deliver(inp, m)) 1033 goto dropunlock; 1034 #endif 1035 so = inp->inp_socket; 1036 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1037 /* 1038 * When the socket is accepting connections (the INPCB is in LISTEN 1039 * state) we look into the SYN cache if this is a new connection 1040 * attempt or the completion of a previous one. 1041 */ 1042 KASSERT(tp->t_state == TCPS_LISTEN || !SOLISTENING(so), 1043 ("%s: so accepting but tp %p not listening", __func__, tp)); 1044 if (tp->t_state == TCPS_LISTEN && SOLISTENING(so)) { 1045 struct in_conninfo inc; 1046 1047 bzero(&inc, sizeof(inc)); 1048 #ifdef INET6 1049 if (isipv6) { 1050 inc.inc_flags |= INC_ISIPV6; 1051 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU) 1052 inc.inc_flags |= INC_IPV6MINMTU; 1053 inc.inc6_faddr = ip6->ip6_src; 1054 inc.inc6_laddr = ip6->ip6_dst; 1055 } else 1056 #endif 1057 { 1058 inc.inc_faddr = ip->ip_src; 1059 inc.inc_laddr = ip->ip_dst; 1060 } 1061 inc.inc_fport = th->th_sport; 1062 inc.inc_lport = th->th_dport; 1063 inc.inc_fibnum = so->so_fibnum; 1064 1065 /* 1066 * Check for an existing connection attempt in syncache if 1067 * the flag is only ACK. A successful lookup creates a new 1068 * socket appended to the listen queue in SYN_RECEIVED state. 1069 */ 1070 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1071 /* 1072 * Parse the TCP options here because 1073 * syncookies need access to the reflected 1074 * timestamp. 1075 */ 1076 tcp_dooptions(&to, optp, optlen, 0); 1077 /* 1078 * NB: syncache_expand() doesn't unlock inp. 1079 */ 1080 rstreason = syncache_expand(&inc, &to, th, &so, m, port); 1081 if (rstreason < 0) { 1082 /* 1083 * A failing TCP MD5 signature comparison 1084 * must result in the segment being dropped 1085 * and must not produce any response back 1086 * to the sender. 1087 */ 1088 goto dropunlock; 1089 } else if (rstreason == 0) { 1090 /* 1091 * No syncache entry, or ACK was not for our 1092 * SYN/ACK. Do our protection against double 1093 * ACK. If peer sent us 2 ACKs, then for the 1094 * first one syncache_expand() successfully 1095 * converted syncache entry into a socket, 1096 * while we were waiting on the inpcb lock. We 1097 * don't want to sent RST for the second ACK, 1098 * so we perform second lookup without wildcard 1099 * match, hoping to find the new socket. If 1100 * the ACK is stray indeed, rstreason would 1101 * hint the above code that the lookup was a 1102 * second attempt. 1103 * 1104 * NB: syncache did its own logging 1105 * of the failure cause. 1106 */ 1107 INP_WUNLOCK(inp); 1108 rstreason = BANDLIM_RST_OPENPORT; 1109 lookupflag &= ~INPLOOKUP_WILDCARD; 1110 goto findpcb; 1111 } 1112 tfo_socket_result: 1113 if (so == NULL) { 1114 /* 1115 * We completed the 3-way handshake 1116 * but could not allocate a socket 1117 * either due to memory shortage, 1118 * listen queue length limits or 1119 * global socket limits. Send RST 1120 * or wait and have the remote end 1121 * retransmit the ACK for another 1122 * try. 1123 */ 1124 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1125 log(LOG_DEBUG, "%s; %s: Listen socket: " 1126 "Socket allocation failed due to " 1127 "limits or memory shortage, %s\n", 1128 s, __func__, 1129 V_tcp_sc_rst_sock_fail ? 1130 "sending RST" : "try again"); 1131 if (V_tcp_sc_rst_sock_fail) { 1132 rstreason = BANDLIM_UNLIMITED; 1133 goto dropwithreset; 1134 } else 1135 goto dropunlock; 1136 } 1137 /* 1138 * Socket is created in state SYN_RECEIVED. 1139 * Unlock the listen socket, lock the newly 1140 * created socket and update the tp variable. 1141 * If we came here via jump to tfo_socket_result, 1142 * then listening socket is read-locked. 1143 */ 1144 INP_UNLOCK(inp); /* listen socket */ 1145 inp = sotoinpcb(so); 1146 /* 1147 * New connection inpcb is already locked by 1148 * syncache_expand(). 1149 */ 1150 INP_WLOCK_ASSERT(inp); 1151 tp = intotcpcb(inp); 1152 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1153 ("%s: ", __func__)); 1154 /* 1155 * Process the segment and the data it 1156 * contains. tcp_do_segment() consumes 1157 * the mbuf chain and unlocks the inpcb. 1158 */ 1159 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1160 tp->t_fb->tfb_tcp_do_segment(tp, m, th, drop_hdrlen, 1161 tlen, iptos); 1162 return (IPPROTO_DONE); 1163 } 1164 /* 1165 * Segment flag validation for new connection attempts: 1166 * 1167 * Our (SYN|ACK) response was rejected. 1168 * Check with syncache and remove entry to prevent 1169 * retransmits. 1170 * 1171 * NB: syncache_chkrst does its own logging of failure 1172 * causes. 1173 */ 1174 if (thflags & TH_RST) { 1175 syncache_chkrst(&inc, th, m, port); 1176 goto dropunlock; 1177 } 1178 /* 1179 * We can't do anything without SYN. 1180 */ 1181 if ((thflags & TH_SYN) == 0) { 1182 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1183 log(LOG_DEBUG, "%s; %s: Listen socket: " 1184 "SYN is missing, segment ignored\n", 1185 s, __func__); 1186 TCPSTAT_INC(tcps_badsyn); 1187 goto dropunlock; 1188 } 1189 /* 1190 * (SYN|ACK) is bogus on a listen socket. 1191 */ 1192 if (thflags & TH_ACK) { 1193 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1194 log(LOG_DEBUG, "%s; %s: Listen socket: " 1195 "SYN|ACK invalid, segment rejected\n", 1196 s, __func__); 1197 syncache_badack(&inc, port); /* XXX: Not needed! */ 1198 TCPSTAT_INC(tcps_badsyn); 1199 rstreason = BANDLIM_RST_OPENPORT; 1200 goto dropwithreset; 1201 } 1202 /* 1203 * If the drop_synfin option is enabled, drop all 1204 * segments with both the SYN and FIN bits set. 1205 * This prevents e.g. nmap from identifying the 1206 * TCP/IP stack. 1207 * XXX: Poor reasoning. nmap has other methods 1208 * and is constantly refining its stack detection 1209 * strategies. 1210 * XXX: This is a violation of the TCP specification 1211 * and was used by RFC1644. 1212 */ 1213 if ((thflags & TH_FIN) && V_drop_synfin) { 1214 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1215 log(LOG_DEBUG, "%s; %s: Listen socket: " 1216 "SYN|FIN segment ignored (based on " 1217 "sysctl setting)\n", s, __func__); 1218 TCPSTAT_INC(tcps_badsyn); 1219 goto dropunlock; 1220 } 1221 /* 1222 * Segment's flags are (SYN) or (SYN|FIN). 1223 * 1224 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1225 * as they do not affect the state of the TCP FSM. 1226 * The data pointed to by TH_URG and th_urp is ignored. 1227 */ 1228 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1229 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1230 KASSERT(thflags & (TH_SYN), 1231 ("%s: Listen socket: TH_SYN not set", __func__)); 1232 INP_RLOCK_ASSERT(inp); 1233 #ifdef INET6 1234 /* 1235 * If deprecated address is forbidden, 1236 * we do not accept SYN to deprecated interface 1237 * address to prevent any new inbound connection from 1238 * getting established. 1239 * When we do not accept SYN, we send a TCP RST, 1240 * with deprecated source address (instead of dropping 1241 * it). We compromise it as it is much better for peer 1242 * to send a RST, and RST will be the final packet 1243 * for the exchange. 1244 * 1245 * If we do not forbid deprecated addresses, we accept 1246 * the SYN packet. RFC2462 does not suggest dropping 1247 * SYN in this case. 1248 * If we decipher RFC2462 5.5.4, it says like this: 1249 * 1. use of deprecated addr with existing 1250 * communication is okay - "SHOULD continue to be 1251 * used" 1252 * 2. use of it with new communication: 1253 * (2a) "SHOULD NOT be used if alternate address 1254 * with sufficient scope is available" 1255 * (2b) nothing mentioned otherwise. 1256 * Here we fall into (2b) case as we have no choice in 1257 * our source address selection - we must obey the peer. 1258 * 1259 * The wording in RFC2462 is confusing, and there are 1260 * multiple description text for deprecated address 1261 * handling - worse, they are not exactly the same. 1262 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1263 */ 1264 if (isipv6 && !V_ip6_use_deprecated) { 1265 struct in6_ifaddr *ia6; 1266 1267 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false); 1268 if (ia6 != NULL && 1269 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1270 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1271 log(LOG_DEBUG, "%s; %s: Listen socket: " 1272 "Connection attempt to deprecated " 1273 "IPv6 address rejected\n", 1274 s, __func__); 1275 rstreason = BANDLIM_RST_OPENPORT; 1276 goto dropwithreset; 1277 } 1278 } 1279 #endif /* INET6 */ 1280 /* 1281 * Basic sanity checks on incoming SYN requests: 1282 * Don't respond if the destination is a link layer 1283 * broadcast according to RFC1122 4.2.3.10, p. 104. 1284 * If it is from this socket it must be forged. 1285 * Don't respond if the source or destination is a 1286 * global or subnet broad- or multicast address. 1287 * Note that it is quite possible to receive unicast 1288 * link-layer packets with a broadcast IP address. Use 1289 * in_broadcast() to find them. 1290 */ 1291 if (m->m_flags & (M_BCAST|M_MCAST)) { 1292 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1293 log(LOG_DEBUG, "%s; %s: Listen socket: " 1294 "Connection attempt from broad- or multicast " 1295 "link layer address ignored\n", s, __func__); 1296 goto dropunlock; 1297 } 1298 #ifdef INET6 1299 if (isipv6) { 1300 if (th->th_dport == th->th_sport && 1301 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1302 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1303 log(LOG_DEBUG, "%s; %s: Listen socket: " 1304 "Connection attempt to/from self " 1305 "ignored\n", s, __func__); 1306 goto dropunlock; 1307 } 1308 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1309 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1310 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1311 log(LOG_DEBUG, "%s; %s: Listen socket: " 1312 "Connection attempt from/to multicast " 1313 "address ignored\n", s, __func__); 1314 goto dropunlock; 1315 } 1316 } 1317 #endif 1318 #if defined(INET) && defined(INET6) 1319 else 1320 #endif 1321 #ifdef INET 1322 { 1323 if (th->th_dport == th->th_sport && 1324 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1325 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1326 log(LOG_DEBUG, "%s; %s: Listen socket: " 1327 "Connection attempt from/to self " 1328 "ignored\n", s, __func__); 1329 goto dropunlock; 1330 } 1331 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1332 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1333 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1334 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1335 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1336 log(LOG_DEBUG, "%s; %s: Listen socket: " 1337 "Connection attempt from/to broad- " 1338 "or multicast address ignored\n", 1339 s, __func__); 1340 goto dropunlock; 1341 } 1342 } 1343 #endif 1344 /* 1345 * SYN appears to be valid. Create compressed TCP state 1346 * for syncache. 1347 */ 1348 TCP_PROBE3(debug__input, tp, th, m); 1349 tcp_dooptions(&to, optp, optlen, TO_SYN); 1350 if ((so = syncache_add(&inc, &to, th, inp, so, m, NULL, NULL, 1351 iptos, port)) != NULL) 1352 goto tfo_socket_result; 1353 1354 /* 1355 * Entry added to syncache and mbuf consumed. 1356 * Only the listen socket is unlocked by syncache_add(). 1357 */ 1358 return (IPPROTO_DONE); 1359 } else if (tp->t_state == TCPS_LISTEN) { 1360 /* 1361 * When a listen socket is torn down the SO_ACCEPTCONN 1362 * flag is removed first while connections are drained 1363 * from the accept queue in a unlock/lock cycle of the 1364 * ACCEPT_LOCK, opening a race condition allowing a SYN 1365 * attempt go through unhandled. 1366 */ 1367 goto dropunlock; 1368 } 1369 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1370 if (tp->t_flags & TF_SIGNATURE) { 1371 tcp_dooptions(&to, optp, optlen, thflags); 1372 if ((to.to_flags & TOF_SIGNATURE) == 0) { 1373 TCPSTAT_INC(tcps_sig_err_nosigopt); 1374 goto dropunlock; 1375 } 1376 if (!TCPMD5_ENABLED() || 1377 TCPMD5_INPUT(m, th, to.to_signature) != 0) 1378 goto dropunlock; 1379 } 1380 #endif 1381 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1382 1383 /* 1384 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1385 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1386 * the inpcb, and unlocks pcbinfo. 1387 * 1388 * XXXGL: in case of a pure SYN arriving on existing connection 1389 * TCP stacks won't need to modify the PCB, they would either drop 1390 * the segment silently, or send a challenge ACK. However, we try 1391 * to upgrade the lock, because calling convention for stacks is 1392 * write-lock on PCB. If upgrade fails, drop the SYN. 1393 */ 1394 if ((lookupflag & INPLOOKUP_RLOCKPCB) && INP_TRY_UPGRADE(inp) == 0) 1395 goto dropunlock; 1396 1397 tp->t_fb->tfb_tcp_do_segment(tp, m, th, drop_hdrlen, tlen, iptos); 1398 return (IPPROTO_DONE); 1399 1400 dropwithreset: 1401 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1402 1403 if (inp != NULL) { 1404 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1405 INP_UNLOCK(inp); 1406 } else 1407 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1408 m = NULL; /* mbuf chain got consumed. */ 1409 goto drop; 1410 1411 dropunlock: 1412 if (m != NULL) 1413 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1414 1415 if (inp != NULL) 1416 INP_UNLOCK(inp); 1417 1418 drop: 1419 if (s != NULL) 1420 free(s, M_TCPLOG); 1421 if (m != NULL) 1422 m_freem(m); 1423 return (IPPROTO_DONE); 1424 } 1425 1426 /* 1427 * Automatic sizing of receive socket buffer. Often the send 1428 * buffer size is not optimally adjusted to the actual network 1429 * conditions at hand (delay bandwidth product). Setting the 1430 * buffer size too small limits throughput on links with high 1431 * bandwidth and high delay (eg. trans-continental/oceanic links). 1432 * 1433 * On the receive side the socket buffer memory is only rarely 1434 * used to any significant extent. This allows us to be much 1435 * more aggressive in scaling the receive socket buffer. For 1436 * the case that the buffer space is actually used to a large 1437 * extent and we run out of kernel memory we can simply drop 1438 * the new segments; TCP on the sender will just retransmit it 1439 * later. Setting the buffer size too big may only consume too 1440 * much kernel memory if the application doesn't read() from 1441 * the socket or packet loss or reordering makes use of the 1442 * reassembly queue. 1443 * 1444 * The criteria to step up the receive buffer one notch are: 1445 * 1. Application has not set receive buffer size with 1446 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1447 * 2. the number of bytes received during 1/2 of an sRTT 1448 * is at least 3/8 of the current socket buffer size. 1449 * 3. receive buffer size has not hit maximal automatic size; 1450 * 1451 * If all of the criteria are met we increaset the socket buffer 1452 * by a 1/2 (bounded by the max). This allows us to keep ahead 1453 * of slow-start but also makes it so our peer never gets limited 1454 * by our rwnd which we then open up causing a burst. 1455 * 1456 * This algorithm does two steps per RTT at most and only if 1457 * we receive a bulk stream w/o packet losses or reorderings. 1458 * Shrinking the buffer during idle times is not necessary as 1459 * it doesn't consume any memory when idle. 1460 * 1461 * TODO: Only step up if the application is actually serving 1462 * the buffer to better manage the socket buffer resources. 1463 */ 1464 int 1465 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so, 1466 struct tcpcb *tp, int tlen) 1467 { 1468 int newsize = 0; 1469 1470 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) && 1471 tp->t_srtt != 0 && tp->rfbuf_ts != 0 && 1472 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) > 1473 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) { 1474 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) && 1475 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) { 1476 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max); 1477 } 1478 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize); 1479 1480 /* Start over with next RTT. */ 1481 tp->rfbuf_ts = 0; 1482 tp->rfbuf_cnt = 0; 1483 } else { 1484 tp->rfbuf_cnt += tlen; /* add up */ 1485 } 1486 return (newsize); 1487 } 1488 1489 int 1490 tcp_input(struct mbuf **mp, int *offp, int proto) 1491 { 1492 return(tcp_input_with_port(mp, offp, proto, 0)); 1493 } 1494 1495 static void 1496 tcp_handle_wakeup(struct tcpcb *tp) 1497 { 1498 1499 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1500 1501 if (tp->t_flags & TF_WAKESOR) { 1502 struct socket *so = tptosocket(tp); 1503 1504 tp->t_flags &= ~TF_WAKESOR; 1505 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1506 sorwakeup_locked(so); 1507 } 1508 } 1509 1510 void 1511 tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 1512 int drop_hdrlen, int tlen, uint8_t iptos) 1513 { 1514 uint16_t thflags; 1515 int acked, ourfinisacked, needoutput = 0; 1516 sackstatus_t sack_changed; 1517 int rstreason, todrop, win, incforsyn = 0; 1518 uint32_t tiwin; 1519 uint16_t nsegs; 1520 char *s; 1521 struct inpcb *inp = tptoinpcb(tp); 1522 struct socket *so = tptosocket(tp); 1523 struct in_conninfo *inc = &inp->inp_inc; 1524 struct mbuf *mfree; 1525 struct tcpopt to; 1526 int tfo_syn; 1527 u_int maxseg; 1528 1529 thflags = tcp_get_flags(th); 1530 tp->sackhint.last_sack_ack = 0; 1531 sack_changed = SACK_NOCHANGE; 1532 nsegs = max(1, m->m_pkthdr.lro_nsegs); 1533 1534 NET_EPOCH_ASSERT(); 1535 INP_WLOCK_ASSERT(inp); 1536 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1537 __func__)); 1538 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1539 __func__)); 1540 1541 #ifdef TCPPCAP 1542 /* Save segment, if requested. */ 1543 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1544 #endif 1545 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 1546 tlen, NULL, true); 1547 1548 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 1549 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1550 log(LOG_DEBUG, "%s; %s: " 1551 "SYN|FIN segment ignored (based on " 1552 "sysctl setting)\n", s, __func__); 1553 free(s, M_TCPLOG); 1554 } 1555 goto drop; 1556 } 1557 1558 /* 1559 * If a segment with the ACK-bit set arrives in the SYN-SENT state 1560 * check SEQ.ACK first. 1561 */ 1562 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 1563 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 1564 rstreason = BANDLIM_UNLIMITED; 1565 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 1566 goto dropwithreset; 1567 } 1568 1569 /* 1570 * Segment received on connection. 1571 * Reset idle time and keep-alive timer. 1572 * XXX: This should be done after segment 1573 * validation to ignore broken/spoofed segs. 1574 */ 1575 if (tp->t_idle_reduce && 1576 (tp->snd_max == tp->snd_una) && 1577 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 1578 cc_after_idle(tp); 1579 tp->t_rcvtime = ticks; 1580 1581 if (thflags & TH_FIN) 1582 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 1583 /* 1584 * Scale up the window into a 32-bit value. 1585 * For the SYN_SENT state the scale is zero. 1586 */ 1587 tiwin = th->th_win << tp->snd_scale; 1588 #ifdef STATS 1589 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 1590 #endif 1591 1592 /* 1593 * TCP ECN processing. 1594 */ 1595 if (tcp_ecn_input_segment(tp, thflags, tlen, 1596 tcp_packets_this_ack(tp, th->th_ack), 1597 iptos)) 1598 cc_cong_signal(tp, th, CC_ECN); 1599 1600 /* 1601 * Parse options on any incoming segment. 1602 */ 1603 tcp_dooptions(&to, (u_char *)(th + 1), 1604 (th->th_off << 2) - sizeof(struct tcphdr), 1605 (thflags & TH_SYN) ? TO_SYN : 0); 1606 1607 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1608 if ((tp->t_flags & TF_SIGNATURE) != 0 && 1609 (to.to_flags & TOF_SIGNATURE) == 0) { 1610 TCPSTAT_INC(tcps_sig_err_sigopt); 1611 /* XXX: should drop? */ 1612 } 1613 #endif 1614 /* 1615 * If echoed timestamp is later than the current time, 1616 * fall back to non RFC1323 RTT calculation. Normalize 1617 * timestamp if syncookies were used when this connection 1618 * was established. 1619 */ 1620 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1621 to.to_tsecr -= tp->ts_offset; 1622 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1623 to.to_tsecr = 0; 1624 else if (tp->t_rxtshift == 1 && 1625 tp->t_flags & TF_PREVVALID && 1626 tp->t_badrxtwin != 0 && 1627 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin)) 1628 cc_cong_signal(tp, th, CC_RTO_ERR); 1629 } 1630 /* 1631 * Process options only when we get SYN/ACK back. The SYN case 1632 * for incoming connections is handled in tcp_syncache. 1633 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1634 * or <SYN,ACK>) segment itself is never scaled. 1635 * XXX this is traditional behavior, may need to be cleaned up. 1636 */ 1637 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1638 /* Handle parallel SYN for ECN */ 1639 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 1640 if ((to.to_flags & TOF_SCALE) && 1641 (tp->t_flags & TF_REQ_SCALE) && 1642 !(tp->t_flags & TF_NOOPT)) { 1643 tp->t_flags |= TF_RCVD_SCALE; 1644 tp->snd_scale = to.to_wscale; 1645 } else 1646 tp->t_flags &= ~TF_REQ_SCALE; 1647 /* 1648 * Initial send window. It will be updated with 1649 * the next incoming segment to the scaled value. 1650 */ 1651 tp->snd_wnd = th->th_win; 1652 if ((to.to_flags & TOF_TS) && 1653 (tp->t_flags & TF_REQ_TSTMP) && 1654 !(tp->t_flags & TF_NOOPT)) { 1655 tp->t_flags |= TF_RCVD_TSTMP; 1656 tp->ts_recent = to.to_tsval; 1657 tp->ts_recent_age = tcp_ts_getticks(); 1658 } else 1659 tp->t_flags &= ~TF_REQ_TSTMP; 1660 if (to.to_flags & TOF_MSS) 1661 tcp_mss(tp, to.to_mss); 1662 if ((tp->t_flags & TF_SACK_PERMIT) && 1663 (!(to.to_flags & TOF_SACKPERM) || 1664 (tp->t_flags & TF_NOOPT))) 1665 tp->t_flags &= ~TF_SACK_PERMIT; 1666 if (IS_FASTOPEN(tp->t_flags)) { 1667 if ((to.to_flags & TOF_FASTOPEN) && 1668 !(tp->t_flags & TF_NOOPT)) { 1669 uint16_t mss; 1670 1671 if (to.to_flags & TOF_MSS) 1672 mss = to.to_mss; 1673 else 1674 if ((inp->inp_vflag & INP_IPV6) != 0) 1675 mss = TCP6_MSS; 1676 else 1677 mss = TCP_MSS; 1678 tcp_fastopen_update_cache(tp, mss, 1679 to.to_tfo_len, to.to_tfo_cookie); 1680 } else 1681 tcp_fastopen_disable_path(tp); 1682 } 1683 } 1684 1685 /* 1686 * If timestamps were negotiated during SYN/ACK and a 1687 * segment without a timestamp is received, silently drop 1688 * the segment, unless it is a RST segment or missing timestamps are 1689 * tolerated. 1690 * See section 3.2 of RFC 7323. 1691 */ 1692 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1693 if (((thflags & TH_RST) != 0) || V_tcp_tolerate_missing_ts) { 1694 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1695 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1696 "segment processed normally\n", 1697 s, __func__); 1698 free(s, M_TCPLOG); 1699 } 1700 } else { 1701 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1702 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1703 "segment silently dropped\n", s, __func__); 1704 free(s, M_TCPLOG); 1705 } 1706 goto drop; 1707 } 1708 } 1709 /* 1710 * If timestamps were not negotiated during SYN/ACK and a 1711 * segment with a timestamp is received, ignore the 1712 * timestamp and process the packet normally. 1713 * See section 3.2 of RFC 7323. 1714 */ 1715 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1716 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1717 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1718 "segment processed normally\n", s, __func__); 1719 free(s, M_TCPLOG); 1720 } 1721 } 1722 1723 /* 1724 * Header prediction: check for the two common cases 1725 * of a uni-directional data xfer. If the packet has 1726 * no control flags, is in-sequence, the window didn't 1727 * change and we're not retransmitting, it's a 1728 * candidate. If the length is zero and the ack moved 1729 * forward, we're the sender side of the xfer. Just 1730 * free the data acked & wake any higher level process 1731 * that was blocked waiting for space. If the length 1732 * is non-zero and the ack didn't move, we're the 1733 * receiver side. If we're getting packets in-order 1734 * (the reassembly queue is empty), add the data to 1735 * the socket buffer and note that we need a delayed ack. 1736 * Make sure that the hidden state-flags are also off. 1737 * Since we check for TCPS_ESTABLISHED first, it can only 1738 * be TH_NEEDSYN. 1739 */ 1740 if (tp->t_state == TCPS_ESTABLISHED && 1741 th->th_seq == tp->rcv_nxt && 1742 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1743 tp->snd_nxt == tp->snd_max && 1744 tiwin && tiwin == tp->snd_wnd && 1745 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1746 SEGQ_EMPTY(tp) && 1747 ((to.to_flags & TOF_TS) == 0 || 1748 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1749 /* 1750 * If last ACK falls within this segment's sequence numbers, 1751 * record the timestamp. 1752 * NOTE that the test is modified according to the latest 1753 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1754 */ 1755 if ((to.to_flags & TOF_TS) != 0 && 1756 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1757 tp->ts_recent_age = tcp_ts_getticks(); 1758 tp->ts_recent = to.to_tsval; 1759 } 1760 1761 if (tlen == 0) { 1762 if (SEQ_GT(th->th_ack, tp->snd_una) && 1763 SEQ_LEQ(th->th_ack, tp->snd_max) && 1764 !IN_RECOVERY(tp->t_flags) && 1765 (to.to_flags & TOF_SACK) == 0 && 1766 TAILQ_EMPTY(&tp->snd_holes)) { 1767 /* 1768 * This is a pure ack for outstanding data. 1769 */ 1770 TCPSTAT_INC(tcps_predack); 1771 1772 /* 1773 * "bad retransmit" recovery without timestamps. 1774 */ 1775 if ((to.to_flags & TOF_TS) == 0 && 1776 tp->t_rxtshift == 1 && 1777 tp->t_flags & TF_PREVVALID && 1778 tp->t_badrxtwin != 0 && 1779 TSTMP_LT(ticks, tp->t_badrxtwin)) { 1780 cc_cong_signal(tp, th, CC_RTO_ERR); 1781 } 1782 1783 /* 1784 * Recalculate the transmit timer / rtt. 1785 * 1786 * Some boxes send broken timestamp replies 1787 * during the SYN+ACK phase, ignore 1788 * timestamps of 0 or we could calculate a 1789 * huge RTT and blow up the retransmit timer. 1790 */ 1791 if ((to.to_flags & TOF_TS) != 0 && 1792 to.to_tsecr) { 1793 uint32_t t; 1794 1795 t = tcp_ts_getticks() - to.to_tsecr; 1796 if (!tp->t_rttlow || tp->t_rttlow > t) 1797 tp->t_rttlow = t; 1798 tcp_xmit_timer(tp, 1799 TCP_TS_TO_TICKS(t) + 1); 1800 } else if (tp->t_rtttime && 1801 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1802 if (!tp->t_rttlow || 1803 tp->t_rttlow > ticks - tp->t_rtttime) 1804 tp->t_rttlow = ticks - tp->t_rtttime; 1805 tcp_xmit_timer(tp, 1806 ticks - tp->t_rtttime); 1807 } 1808 acked = BYTES_THIS_ACK(tp, th); 1809 1810 #ifdef TCP_HHOOK 1811 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1812 hhook_run_tcp_est_in(tp, th, &to); 1813 #endif 1814 1815 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 1816 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1817 sbdrop(&so->so_snd, acked); 1818 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1819 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1820 tp->snd_recover = th->th_ack - 1; 1821 1822 /* 1823 * Let the congestion control algorithm update 1824 * congestion control related information. This 1825 * typically means increasing the congestion 1826 * window. 1827 */ 1828 cc_ack_received(tp, th, nsegs, CC_ACK); 1829 1830 tp->snd_una = th->th_ack; 1831 /* 1832 * Pull snd_wl2 up to prevent seq wrap relative 1833 * to th_ack. 1834 */ 1835 tp->snd_wl2 = th->th_ack; 1836 tp->t_dupacks = 0; 1837 m_freem(m); 1838 1839 /* 1840 * If all outstanding data are acked, stop 1841 * retransmit timer, otherwise restart timer 1842 * using current (possibly backed-off) value. 1843 * If process is waiting for space, 1844 * wakeup/selwakeup/signal. If data 1845 * are ready to send, let tcp_output 1846 * decide between more output or persist. 1847 */ 1848 TCP_PROBE3(debug__input, tp, th, m); 1849 /* 1850 * Clear t_acktime if remote side has ACKd 1851 * all data in the socket buffer. 1852 * Otherwise, update t_acktime if we received 1853 * a sufficiently large ACK. 1854 */ 1855 if (sbavail(&so->so_snd) == 0) 1856 tp->t_acktime = 0; 1857 else if (acked > 1) 1858 tp->t_acktime = ticks; 1859 if (tp->snd_una == tp->snd_max) 1860 tcp_timer_activate(tp, TT_REXMT, 0); 1861 else if (!tcp_timer_active(tp, TT_PERSIST)) 1862 tcp_timer_activate(tp, TT_REXMT, 1863 TP_RXTCUR(tp)); 1864 sowwakeup(so); 1865 if (sbavail(&so->so_snd)) 1866 (void) tcp_output(tp); 1867 goto check_delack; 1868 } 1869 } else if (th->th_ack == tp->snd_una && 1870 tlen <= sbspace(&so->so_rcv)) { 1871 int newsize = 0; /* automatic sockbuf scaling */ 1872 1873 /* 1874 * This is a pure, in-sequence data packet with 1875 * nothing on the reassembly queue and we have enough 1876 * buffer space to take it. 1877 */ 1878 /* Clean receiver SACK report if present */ 1879 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1880 tcp_clean_sackreport(tp); 1881 TCPSTAT_INC(tcps_preddat); 1882 tp->rcv_nxt += tlen; 1883 if (tlen && 1884 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 1885 (tp->t_fbyte_in == 0)) { 1886 tp->t_fbyte_in = ticks; 1887 if (tp->t_fbyte_in == 0) 1888 tp->t_fbyte_in = 1; 1889 if (tp->t_fbyte_out && tp->t_fbyte_in) 1890 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 1891 } 1892 /* 1893 * Pull snd_wl1 up to prevent seq wrap relative to 1894 * th_seq. 1895 */ 1896 tp->snd_wl1 = th->th_seq; 1897 /* 1898 * Pull rcv_up up to prevent seq wrap relative to 1899 * rcv_nxt. 1900 */ 1901 tp->rcv_up = tp->rcv_nxt; 1902 TCPSTAT_ADD(tcps_rcvpack, nsegs); 1903 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1904 TCP_PROBE3(debug__input, tp, th, m); 1905 1906 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 1907 1908 /* Add data to socket buffer. */ 1909 SOCKBUF_LOCK(&so->so_rcv); 1910 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1911 m_freem(m); 1912 } else { 1913 /* 1914 * Set new socket buffer size. 1915 * Give up when limit is reached. 1916 */ 1917 if (newsize) 1918 if (!sbreserve_locked(so, SO_RCV, 1919 newsize, NULL)) 1920 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1921 m_adj(m, drop_hdrlen); /* delayed header drop */ 1922 sbappendstream_locked(&so->so_rcv, m, 0); 1923 } 1924 /* NB: sorwakeup_locked() does an implicit unlock. */ 1925 sorwakeup_locked(so); 1926 if (DELAY_ACK(tp, tlen)) { 1927 tp->t_flags |= TF_DELACK; 1928 } else { 1929 tp->t_flags |= TF_ACKNOW; 1930 tcp_output(tp); 1931 } 1932 goto check_delack; 1933 } 1934 } 1935 1936 /* 1937 * Calculate amount of space in receive window, 1938 * and then do TCP input processing. 1939 * Receive window is amount of space in rcv queue, 1940 * but not less than advertised window. 1941 */ 1942 win = sbspace(&so->so_rcv); 1943 if (win < 0) 1944 win = 0; 1945 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1946 1947 switch (tp->t_state) { 1948 /* 1949 * If the state is SYN_RECEIVED: 1950 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1951 */ 1952 case TCPS_SYN_RECEIVED: 1953 if (thflags & TH_RST) { 1954 /* Handle RST segments later. */ 1955 break; 1956 } 1957 if ((thflags & TH_ACK) && 1958 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1959 SEQ_GT(th->th_ack, tp->snd_max))) { 1960 rstreason = BANDLIM_RST_OPENPORT; 1961 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 1962 goto dropwithreset; 1963 } 1964 if (IS_FASTOPEN(tp->t_flags)) { 1965 /* 1966 * When a TFO connection is in SYN_RECEIVED, the 1967 * only valid packets are the initial SYN, a 1968 * retransmit/copy of the initial SYN (possibly with 1969 * a subset of the original data), a valid ACK, a 1970 * FIN, or a RST. 1971 */ 1972 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1973 rstreason = BANDLIM_RST_OPENPORT; 1974 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 1975 goto dropwithreset; 1976 } else if (thflags & TH_SYN) { 1977 /* non-initial SYN is ignored */ 1978 if ((tcp_timer_active(tp, TT_DELACK) || 1979 tcp_timer_active(tp, TT_REXMT))) 1980 goto drop; 1981 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1982 goto drop; 1983 } 1984 } 1985 break; 1986 1987 /* 1988 * If the state is SYN_SENT: 1989 * if seg contains a RST with valid ACK (SEQ.ACK has already 1990 * been verified), then drop the connection. 1991 * if seg contains a RST without an ACK, drop the seg. 1992 * if seg does not contain SYN, then drop the seg. 1993 * Otherwise this is an acceptable SYN segment 1994 * initialize tp->rcv_nxt and tp->irs 1995 * if seg contains ack then advance tp->snd_una 1996 * if seg contains an ECE and ECN support is enabled, the stream 1997 * is ECN capable. 1998 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1999 * arrange for segment to be acked (eventually) 2000 * continue processing rest of data/controls, beginning with URG 2001 */ 2002 case TCPS_SYN_SENT: 2003 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 2004 TCP_PROBE5(connect__refused, NULL, tp, 2005 m, tp, th); 2006 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 2007 tp = tcp_drop(tp, ECONNREFUSED); 2008 } 2009 if (thflags & TH_RST) 2010 goto drop; 2011 if (!(thflags & TH_SYN)) 2012 goto drop; 2013 2014 tp->irs = th->th_seq; 2015 tcp_rcvseqinit(tp); 2016 if (thflags & TH_ACK) { 2017 int tfo_partial_ack = 0; 2018 2019 TCPSTAT_INC(tcps_connects); 2020 soisconnected(so); 2021 #ifdef MAC 2022 mac_socketpeer_set_from_mbuf(m, so); 2023 #endif 2024 /* Do window scaling on this connection? */ 2025 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2026 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2027 tp->rcv_scale = tp->request_r_scale; 2028 } 2029 tp->rcv_adv += min(tp->rcv_wnd, 2030 TCP_MAXWIN << tp->rcv_scale); 2031 tp->snd_una++; /* SYN is acked */ 2032 /* 2033 * If not all the data that was sent in the TFO SYN 2034 * has been acked, resend the remainder right away. 2035 */ 2036 if (IS_FASTOPEN(tp->t_flags) && 2037 (tp->snd_una != tp->snd_max)) { 2038 tp->snd_nxt = th->th_ack; 2039 tfo_partial_ack = 1; 2040 } 2041 /* 2042 * If there's data, delay ACK; if there's also a FIN 2043 * ACKNOW will be turned on later. 2044 */ 2045 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack) 2046 tcp_timer_activate(tp, TT_DELACK, 2047 tcp_delacktime); 2048 else 2049 tp->t_flags |= TF_ACKNOW; 2050 2051 tcp_ecn_input_syn_sent(tp, thflags, iptos); 2052 2053 /* 2054 * Received <SYN,ACK> in SYN_SENT[*] state. 2055 * Transitions: 2056 * SYN_SENT --> ESTABLISHED 2057 * SYN_SENT* --> FIN_WAIT_1 2058 */ 2059 tp->t_starttime = ticks; 2060 if (tp->t_flags & TF_NEEDFIN) { 2061 tp->t_acktime = ticks; 2062 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2063 tp->t_flags &= ~TF_NEEDFIN; 2064 thflags &= ~TH_SYN; 2065 } else { 2066 tcp_state_change(tp, TCPS_ESTABLISHED); 2067 TCP_PROBE5(connect__established, NULL, tp, 2068 m, tp, th); 2069 cc_conn_init(tp); 2070 tcp_timer_activate(tp, TT_KEEP, 2071 TP_KEEPIDLE(tp)); 2072 } 2073 } else { 2074 /* 2075 * Received initial SYN in SYN-SENT[*] state => 2076 * simultaneous open. 2077 * If it succeeds, connection is * half-synchronized. 2078 * Otherwise, do 3-way handshake: 2079 * SYN-SENT -> SYN-RECEIVED 2080 * SYN-SENT* -> SYN-RECEIVED* 2081 */ 2082 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 2083 tcp_timer_activate(tp, TT_REXMT, 0); 2084 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2085 } 2086 2087 /* 2088 * Advance th->th_seq to correspond to first data byte. 2089 * If data, trim to stay within window, 2090 * dropping FIN if necessary. 2091 */ 2092 th->th_seq++; 2093 if (tlen > tp->rcv_wnd) { 2094 todrop = tlen - tp->rcv_wnd; 2095 m_adj(m, -todrop); 2096 tlen = tp->rcv_wnd; 2097 thflags &= ~TH_FIN; 2098 TCPSTAT_INC(tcps_rcvpackafterwin); 2099 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2100 } 2101 tp->snd_wl1 = th->th_seq - 1; 2102 tp->rcv_up = th->th_seq; 2103 /* 2104 * Client side of transaction: already sent SYN and data. 2105 * If the remote host used T/TCP to validate the SYN, 2106 * our data will be ACK'd; if so, enter normal data segment 2107 * processing in the middle of step 5, ack processing. 2108 * Otherwise, goto step 6. 2109 */ 2110 if (thflags & TH_ACK) 2111 goto process_ACK; 2112 2113 goto step6; 2114 } 2115 2116 /* 2117 * States other than LISTEN or SYN_SENT. 2118 * First check the RST flag and sequence number since reset segments 2119 * are exempt from the timestamp and connection count tests. This 2120 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2121 * below which allowed reset segments in half the sequence space 2122 * to fall though and be processed (which gives forged reset 2123 * segments with a random sequence number a 50 percent chance of 2124 * killing a connection). 2125 * Then check timestamp, if present. 2126 * Then check the connection count, if present. 2127 * Then check that at least some bytes of segment are within 2128 * receive window. If segment begins before rcv_nxt, 2129 * drop leading data (and SYN); if nothing left, just ack. 2130 */ 2131 if (thflags & TH_RST) { 2132 /* 2133 * RFC5961 Section 3.2 2134 * 2135 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2136 * - If RST is in window, we send challenge ACK. 2137 * 2138 * Note: to take into account delayed ACKs, we should 2139 * test against last_ack_sent instead of rcv_nxt. 2140 * Note 2: we handle special case of closed window, not 2141 * covered by the RFC. 2142 */ 2143 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2144 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2145 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2146 KASSERT(tp->t_state != TCPS_SYN_SENT, 2147 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2148 __func__, th, tp)); 2149 2150 if (V_tcp_insecure_rst || 2151 tp->last_ack_sent == th->th_seq) { 2152 TCPSTAT_INC(tcps_drops); 2153 /* Drop the connection. */ 2154 switch (tp->t_state) { 2155 case TCPS_SYN_RECEIVED: 2156 so->so_error = ECONNREFUSED; 2157 goto close; 2158 case TCPS_ESTABLISHED: 2159 case TCPS_FIN_WAIT_1: 2160 case TCPS_FIN_WAIT_2: 2161 case TCPS_CLOSE_WAIT: 2162 case TCPS_CLOSING: 2163 case TCPS_LAST_ACK: 2164 so->so_error = ECONNRESET; 2165 close: 2166 /* FALLTHROUGH */ 2167 default: 2168 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_RST); 2169 tp = tcp_close(tp); 2170 } 2171 } else { 2172 TCPSTAT_INC(tcps_badrst); 2173 /* Send challenge ACK. */ 2174 tcp_respond(tp, mtod(m, void *), th, m, 2175 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2176 tp->last_ack_sent = tp->rcv_nxt; 2177 m = NULL; 2178 } 2179 } 2180 goto drop; 2181 } 2182 2183 /* 2184 * RFC5961 Section 4.2 2185 * Send challenge ACK for any SYN in synchronized state. 2186 */ 2187 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT && 2188 tp->t_state != TCPS_SYN_RECEIVED) { 2189 TCPSTAT_INC(tcps_badsyn); 2190 if (V_tcp_insecure_syn && 2191 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2192 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2193 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 2194 tp = tcp_drop(tp, ECONNRESET); 2195 rstreason = BANDLIM_UNLIMITED; 2196 } else { 2197 tcp_ecn_input_syn_sent(tp, thflags, iptos); 2198 /* Send challenge ACK. */ 2199 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2200 tp->snd_nxt, TH_ACK); 2201 tp->last_ack_sent = tp->rcv_nxt; 2202 m = NULL; 2203 } 2204 goto drop; 2205 } 2206 2207 /* 2208 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2209 * and it's less than ts_recent, drop it. 2210 */ 2211 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2212 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2213 /* Check to see if ts_recent is over 24 days old. */ 2214 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2215 /* 2216 * Invalidate ts_recent. If this segment updates 2217 * ts_recent, the age will be reset later and ts_recent 2218 * will get a valid value. If it does not, setting 2219 * ts_recent to zero will at least satisfy the 2220 * requirement that zero be placed in the timestamp 2221 * echo reply when ts_recent isn't valid. The 2222 * age isn't reset until we get a valid ts_recent 2223 * because we don't want out-of-order segments to be 2224 * dropped when ts_recent is old. 2225 */ 2226 tp->ts_recent = 0; 2227 } else { 2228 TCPSTAT_INC(tcps_rcvduppack); 2229 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2230 TCPSTAT_INC(tcps_pawsdrop); 2231 if (tlen) 2232 goto dropafterack; 2233 goto drop; 2234 } 2235 } 2236 2237 /* 2238 * In the SYN-RECEIVED state, validate that the packet belongs to 2239 * this connection before trimming the data to fit the receive 2240 * window. Check the sequence number versus IRS since we know 2241 * the sequence numbers haven't wrapped. This is a partial fix 2242 * for the "LAND" DoS attack. 2243 */ 2244 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2245 rstreason = BANDLIM_RST_OPENPORT; 2246 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 2247 goto dropwithreset; 2248 } 2249 2250 todrop = tp->rcv_nxt - th->th_seq; 2251 if (todrop > 0) { 2252 if (thflags & TH_SYN) { 2253 thflags &= ~TH_SYN; 2254 th->th_seq++; 2255 if (th->th_urp > 1) 2256 th->th_urp--; 2257 else 2258 thflags &= ~TH_URG; 2259 todrop--; 2260 } 2261 /* 2262 * Following if statement from Stevens, vol. 2, p. 960. 2263 */ 2264 if (todrop > tlen 2265 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2266 /* 2267 * Any valid FIN must be to the left of the window. 2268 * At this point the FIN must be a duplicate or out 2269 * of sequence; drop it. 2270 */ 2271 thflags &= ~TH_FIN; 2272 2273 /* 2274 * Send an ACK to resynchronize and drop any data. 2275 * But keep on processing for RST or ACK. 2276 */ 2277 tp->t_flags |= TF_ACKNOW; 2278 todrop = tlen; 2279 TCPSTAT_INC(tcps_rcvduppack); 2280 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2281 } else { 2282 TCPSTAT_INC(tcps_rcvpartduppack); 2283 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2284 } 2285 /* 2286 * DSACK - add SACK block for dropped range 2287 */ 2288 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) { 2289 tcp_update_sack_list(tp, th->th_seq, 2290 th->th_seq + todrop); 2291 /* 2292 * ACK now, as the next in-sequence segment 2293 * will clear the DSACK block again 2294 */ 2295 tp->t_flags |= TF_ACKNOW; 2296 } 2297 drop_hdrlen += todrop; /* drop from the top afterwards */ 2298 th->th_seq += todrop; 2299 tlen -= todrop; 2300 if (th->th_urp > todrop) 2301 th->th_urp -= todrop; 2302 else { 2303 thflags &= ~TH_URG; 2304 th->th_urp = 0; 2305 } 2306 } 2307 2308 /* 2309 * If new data are received on a connection after the 2310 * user processes are gone, then RST the other end. 2311 */ 2312 if ((tp->t_flags & TF_CLOSED) && tlen) { 2313 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2314 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2315 "after socket was closed, " 2316 "sending RST and removing tcpcb\n", 2317 s, __func__, tcpstates[tp->t_state], tlen); 2318 free(s, M_TCPLOG); 2319 } 2320 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 2321 /* tcp_close will kill the inp pre-log the Reset */ 2322 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 2323 tp = tcp_close(tp); 2324 TCPSTAT_INC(tcps_rcvafterclose); 2325 rstreason = BANDLIM_UNLIMITED; 2326 goto dropwithreset; 2327 } 2328 2329 /* 2330 * If segment ends after window, drop trailing data 2331 * (and PUSH and FIN); if nothing left, just ACK. 2332 */ 2333 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2334 if (todrop > 0) { 2335 TCPSTAT_INC(tcps_rcvpackafterwin); 2336 if (todrop >= tlen) { 2337 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2338 /* 2339 * If window is closed can only take segments at 2340 * window edge, and have to drop data and PUSH from 2341 * incoming segments. Continue processing, but 2342 * remember to ack. Otherwise, drop segment 2343 * and ack. 2344 */ 2345 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2346 tp->t_flags |= TF_ACKNOW; 2347 TCPSTAT_INC(tcps_rcvwinprobe); 2348 } else 2349 goto dropafterack; 2350 } else 2351 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2352 m_adj(m, -todrop); 2353 tlen -= todrop; 2354 thflags &= ~(TH_PUSH|TH_FIN); 2355 } 2356 2357 /* 2358 * If last ACK falls within this segment's sequence numbers, 2359 * record its timestamp. 2360 * NOTE: 2361 * 1) That the test incorporates suggestions from the latest 2362 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2363 * 2) That updating only on newer timestamps interferes with 2364 * our earlier PAWS tests, so this check should be solely 2365 * predicated on the sequence space of this segment. 2366 * 3) That we modify the segment boundary check to be 2367 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2368 * instead of RFC1323's 2369 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2370 * This modified check allows us to overcome RFC1323's 2371 * limitations as described in Stevens TCP/IP Illustrated 2372 * Vol. 2 p.869. In such cases, we can still calculate the 2373 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2374 */ 2375 if ((to.to_flags & TOF_TS) != 0 && 2376 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2377 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2378 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2379 tp->ts_recent_age = tcp_ts_getticks(); 2380 tp->ts_recent = to.to_tsval; 2381 } 2382 2383 /* 2384 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2385 * flag is on (half-synchronized state), then queue data for 2386 * later processing; else drop segment and return. 2387 */ 2388 if ((thflags & TH_ACK) == 0) { 2389 if (tp->t_state == TCPS_SYN_RECEIVED || 2390 (tp->t_flags & TF_NEEDSYN)) { 2391 if (tp->t_state == TCPS_SYN_RECEIVED && 2392 IS_FASTOPEN(tp->t_flags)) { 2393 tp->snd_wnd = tiwin; 2394 cc_conn_init(tp); 2395 } 2396 goto step6; 2397 } else if (tp->t_flags & TF_ACKNOW) 2398 goto dropafterack; 2399 else 2400 goto drop; 2401 } 2402 2403 /* 2404 * Ack processing. 2405 */ 2406 switch (tp->t_state) { 2407 /* 2408 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2409 * ESTABLISHED state and continue processing. 2410 * The ACK was checked above. 2411 */ 2412 case TCPS_SYN_RECEIVED: 2413 2414 TCPSTAT_INC(tcps_connects); 2415 if (tp->t_flags & TF_SONOTCONN) { 2416 /* 2417 * Usually SYN_RECEIVED had been created from a LISTEN, 2418 * and solisten_enqueue() has already marked the socket 2419 * layer as connected. If it didn't, which can happen 2420 * only with an accept_filter(9), then the tp is marked 2421 * with TF_SONOTCONN. The other reason for this mark 2422 * to be set is a simultaneous open, a SYN_RECEIVED 2423 * that had been created from SYN_SENT. 2424 */ 2425 tp->t_flags &= ~TF_SONOTCONN; 2426 soisconnected(so); 2427 } 2428 /* Do window scaling? */ 2429 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2430 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2431 tp->rcv_scale = tp->request_r_scale; 2432 } 2433 tp->snd_wnd = tiwin; 2434 /* 2435 * Make transitions: 2436 * SYN-RECEIVED -> ESTABLISHED 2437 * SYN-RECEIVED* -> FIN-WAIT-1 2438 */ 2439 tp->t_starttime = ticks; 2440 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 2441 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2442 tp->t_tfo_pending = NULL; 2443 } 2444 if (tp->t_flags & TF_NEEDFIN) { 2445 tp->t_acktime = ticks; 2446 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2447 tp->t_flags &= ~TF_NEEDFIN; 2448 } else { 2449 tcp_state_change(tp, TCPS_ESTABLISHED); 2450 TCP_PROBE5(accept__established, NULL, tp, 2451 m, tp, th); 2452 /* 2453 * TFO connections call cc_conn_init() during SYN 2454 * processing. Calling it again here for such 2455 * connections is not harmless as it would undo the 2456 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2457 * is retransmitted. 2458 */ 2459 if (!IS_FASTOPEN(tp->t_flags)) 2460 cc_conn_init(tp); 2461 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2462 } 2463 /* 2464 * Account for the ACK of our SYN prior to 2465 * regular ACK processing below, except for 2466 * simultaneous SYN, which is handled later. 2467 */ 2468 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 2469 incforsyn = 1; 2470 /* 2471 * If segment contains data or ACK, will call tcp_reass() 2472 * later; if not, do so now to pass queued data to user. 2473 */ 2474 if (tlen == 0 && (thflags & TH_FIN) == 0) { 2475 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 2476 (struct mbuf *)0); 2477 tcp_handle_wakeup(tp); 2478 } 2479 tp->snd_wl1 = th->th_seq - 1; 2480 /* FALLTHROUGH */ 2481 2482 /* 2483 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2484 * ACKs. If the ack is in the range 2485 * tp->snd_una < th->th_ack <= tp->snd_max 2486 * then advance tp->snd_una to th->th_ack and drop 2487 * data from the retransmission queue. If this ACK reflects 2488 * more up to date window information we update our window information. 2489 */ 2490 case TCPS_ESTABLISHED: 2491 case TCPS_FIN_WAIT_1: 2492 case TCPS_FIN_WAIT_2: 2493 case TCPS_CLOSE_WAIT: 2494 case TCPS_CLOSING: 2495 case TCPS_LAST_ACK: 2496 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2497 TCPSTAT_INC(tcps_rcvacktoomuch); 2498 goto dropafterack; 2499 } 2500 if (tcp_is_sack_recovery(tp, &to)) { 2501 if (((sack_changed = tcp_sack_doack(tp, &to, th->th_ack)) != 0) && 2502 (tp->t_flags & TF_LRD)) { 2503 tcp_sack_lost_retransmission(tp, th); 2504 } 2505 } else 2506 /* 2507 * Reset the value so that previous (valid) value 2508 * from the last ack with SACK doesn't get used. 2509 */ 2510 tp->sackhint.sacked_bytes = 0; 2511 2512 #ifdef TCP_HHOOK 2513 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2514 hhook_run_tcp_est_in(tp, th, &to); 2515 #endif 2516 2517 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2518 maxseg = tcp_maxseg(tp); 2519 if (tlen == 0 && 2520 (tiwin == tp->snd_wnd || 2521 (tp->t_flags & TF_SACK_PERMIT))) { 2522 /* 2523 * If this is the first time we've seen a 2524 * FIN from the remote, this is not a 2525 * duplicate and it needs to be processed 2526 * normally. This happens during a 2527 * simultaneous close. 2528 */ 2529 if ((thflags & TH_FIN) && 2530 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2531 tp->t_dupacks = 0; 2532 break; 2533 } 2534 TCPSTAT_INC(tcps_rcvdupack); 2535 /* 2536 * If we have outstanding data (other than 2537 * a window probe), this is a completely 2538 * duplicate ack (ie, window info didn't 2539 * change and FIN isn't set), 2540 * the ack is the biggest we've 2541 * seen and we've seen exactly our rexmt 2542 * threshold of them, assume a packet 2543 * has been dropped and retransmit it. 2544 * Kludge snd_nxt & the congestion 2545 * window so we send only this one 2546 * packet. 2547 * 2548 * We know we're losing at the current 2549 * window size so do congestion avoidance 2550 * (set ssthresh to half the current window 2551 * and pull our congestion window back to 2552 * the new ssthresh). 2553 * 2554 * Dup acks mean that packets have left the 2555 * network (they're now cached at the receiver) 2556 * so bump cwnd by the amount in the receiver 2557 * to keep a constant cwnd packets in the 2558 * network. 2559 * 2560 * When using TCP ECN, notify the peer that 2561 * we reduced the cwnd. 2562 */ 2563 /* 2564 * Following 2 kinds of acks should not affect 2565 * dupack counting: 2566 * 1) Old acks 2567 * 2) Acks with SACK but without any new SACK 2568 * information in them. These could result from 2569 * any anomaly in the network like a switch 2570 * duplicating packets or a possible DoS attack. 2571 */ 2572 if (th->th_ack != tp->snd_una || 2573 (tcp_is_sack_recovery(tp, &to) && 2574 (sack_changed == SACK_NOCHANGE))) 2575 break; 2576 else if (!tcp_timer_active(tp, TT_REXMT)) 2577 tp->t_dupacks = 0; 2578 else if (++tp->t_dupacks > tcprexmtthresh || 2579 IN_FASTRECOVERY(tp->t_flags)) { 2580 cc_ack_received(tp, th, nsegs, 2581 CC_DUPACK); 2582 if (V_tcp_do_prr && 2583 IN_FASTRECOVERY(tp->t_flags) && 2584 (tp->t_flags & TF_SACK_PERMIT)) { 2585 tcp_do_prr_ack(tp, th, &to, sack_changed); 2586 } else if (tcp_is_sack_recovery(tp, &to) && 2587 IN_FASTRECOVERY(tp->t_flags)) { 2588 int awnd; 2589 2590 /* 2591 * Compute the amount of data in flight first. 2592 * We can inject new data into the pipe iff 2593 * we have less than 1/2 the original window's 2594 * worth of data in flight. 2595 */ 2596 if (V_tcp_do_newsack) 2597 awnd = tcp_compute_pipe(tp); 2598 else 2599 awnd = (tp->snd_nxt - tp->snd_fack) + 2600 tp->sackhint.sack_bytes_rexmit; 2601 2602 if (awnd < tp->snd_ssthresh) { 2603 tp->snd_cwnd += maxseg; 2604 if (tp->snd_cwnd > tp->snd_ssthresh) 2605 tp->snd_cwnd = tp->snd_ssthresh; 2606 } 2607 } else 2608 tp->snd_cwnd += maxseg; 2609 (void) tcp_output(tp); 2610 goto drop; 2611 } else if (tp->t_dupacks == tcprexmtthresh || 2612 (tp->t_flags & TF_SACK_PERMIT && 2613 V_tcp_do_newsack && 2614 tp->sackhint.sacked_bytes > 2615 (tcprexmtthresh - 1) * maxseg)) { 2616 enter_recovery: 2617 /* 2618 * Above is the RFC6675 trigger condition of 2619 * more than (dupthresh-1)*maxseg sacked data. 2620 * If the count of holes in the 2621 * scoreboard is >= dupthresh, we could 2622 * also enter loss recovery, but don't 2623 * have that value readily available. 2624 */ 2625 tp->t_dupacks = tcprexmtthresh; 2626 tcp_seq onxt = tp->snd_nxt; 2627 2628 /* 2629 * If we're doing sack, or prr, check 2630 * to see if we're already in sack 2631 * recovery. If we're not doing sack, 2632 * check to see if we're in newreno 2633 * recovery. 2634 */ 2635 if (V_tcp_do_prr || 2636 (tp->t_flags & TF_SACK_PERMIT)) { 2637 if (IN_FASTRECOVERY(tp->t_flags)) { 2638 tp->t_dupacks = 0; 2639 break; 2640 } 2641 } else { 2642 if (SEQ_LEQ(th->th_ack, 2643 tp->snd_recover)) { 2644 tp->t_dupacks = 0; 2645 break; 2646 } 2647 } 2648 /* Congestion signal before ack. */ 2649 cc_cong_signal(tp, th, CC_NDUPACK); 2650 cc_ack_received(tp, th, nsegs, 2651 CC_DUPACK); 2652 tcp_timer_activate(tp, TT_REXMT, 0); 2653 tp->t_rtttime = 0; 2654 if (V_tcp_do_prr) { 2655 /* 2656 * snd_ssthresh is already updated by 2657 * cc_cong_signal. 2658 */ 2659 if (tcp_is_sack_recovery(tp, &to)) { 2660 /* 2661 * Exclude Limited Transmit 2662 * segments here 2663 */ 2664 tp->sackhint.prr_delivered = 2665 maxseg; 2666 } else { 2667 tp->sackhint.prr_delivered = 2668 imin(tp->snd_max - tp->snd_una, 2669 imin(INT_MAX / 65536, 2670 tp->t_dupacks) * maxseg); 2671 } 2672 tp->sackhint.recover_fs = max(1, 2673 tp->snd_nxt - tp->snd_una); 2674 } 2675 if (tcp_is_sack_recovery(tp, &to)) { 2676 TCPSTAT_INC( 2677 tcps_sack_recovery_episode); 2678 tp->snd_recover = tp->snd_nxt; 2679 tp->snd_cwnd = maxseg; 2680 (void) tcp_output(tp); 2681 if (SEQ_GT(th->th_ack, tp->snd_una)) 2682 goto resume_partialack; 2683 goto drop; 2684 } 2685 tp->snd_nxt = th->th_ack; 2686 tp->snd_cwnd = maxseg; 2687 (void) tcp_output(tp); 2688 KASSERT(tp->snd_limited <= 2, 2689 ("%s: tp->snd_limited too big", 2690 __func__)); 2691 tp->snd_cwnd = tp->snd_ssthresh + 2692 maxseg * 2693 (tp->t_dupacks - tp->snd_limited); 2694 if (SEQ_GT(onxt, tp->snd_nxt)) 2695 tp->snd_nxt = onxt; 2696 goto drop; 2697 } else if (V_tcp_do_rfc3042) { 2698 /* 2699 * Process first and second duplicate 2700 * ACKs. Each indicates a segment 2701 * leaving the network, creating room 2702 * for more. Make sure we can send a 2703 * packet on reception of each duplicate 2704 * ACK by increasing snd_cwnd by one 2705 * segment. Restore the original 2706 * snd_cwnd after packet transmission. 2707 */ 2708 cc_ack_received(tp, th, nsegs, 2709 CC_DUPACK); 2710 uint32_t oldcwnd = tp->snd_cwnd; 2711 tcp_seq oldsndmax = tp->snd_max; 2712 u_int sent; 2713 int avail; 2714 2715 KASSERT(tp->t_dupacks == 1 || 2716 tp->t_dupacks == 2, 2717 ("%s: dupacks not 1 or 2", 2718 __func__)); 2719 if (tp->t_dupacks == 1) 2720 tp->snd_limited = 0; 2721 tp->snd_cwnd = 2722 (tp->snd_nxt - tp->snd_una) + 2723 (tp->t_dupacks - tp->snd_limited) * 2724 maxseg; 2725 /* 2726 * Only call tcp_output when there 2727 * is new data available to be sent 2728 * or we need to send an ACK. 2729 */ 2730 SOCKBUF_LOCK(&so->so_snd); 2731 avail = sbavail(&so->so_snd) - 2732 (tp->snd_nxt - tp->snd_una); 2733 SOCKBUF_UNLOCK(&so->so_snd); 2734 if (avail > 0 || tp->t_flags & TF_ACKNOW) 2735 (void) tcp_output(tp); 2736 sent = tp->snd_max - oldsndmax; 2737 if (sent > maxseg) { 2738 KASSERT((tp->t_dupacks == 2 && 2739 tp->snd_limited == 0) || 2740 (sent == maxseg + 1 && 2741 tp->t_flags & TF_SENTFIN), 2742 ("%s: sent too much", 2743 __func__)); 2744 tp->snd_limited = 2; 2745 } else if (sent > 0) 2746 ++tp->snd_limited; 2747 tp->snd_cwnd = oldcwnd; 2748 goto drop; 2749 } 2750 } 2751 break; 2752 } else { 2753 /* 2754 * This ack is advancing the left edge, reset the 2755 * counter. 2756 */ 2757 tp->t_dupacks = 0; 2758 /* 2759 * If this ack also has new SACK info, increment the 2760 * counter as per rfc6675. The variable 2761 * sack_changed tracks all changes to the SACK 2762 * scoreboard, including when partial ACKs without 2763 * SACK options are received, and clear the scoreboard 2764 * from the left side. Such partial ACKs should not be 2765 * counted as dupacks here. 2766 */ 2767 if (tcp_is_sack_recovery(tp, &to) && 2768 (sack_changed != SACK_NOCHANGE)) { 2769 tp->t_dupacks++; 2770 /* limit overhead by setting maxseg last */ 2771 if (!IN_FASTRECOVERY(tp->t_flags) && 2772 (tp->sackhint.sacked_bytes > 2773 ((tcprexmtthresh - 1) * 2774 (maxseg = tcp_maxseg(tp))))) { 2775 goto enter_recovery; 2776 } 2777 } 2778 } 2779 2780 resume_partialack: 2781 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2782 ("%s: th_ack <= snd_una", __func__)); 2783 2784 /* 2785 * If the congestion window was inflated to account 2786 * for the other side's cached packets, retract it. 2787 */ 2788 if (IN_FASTRECOVERY(tp->t_flags)) { 2789 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2790 if (tp->t_flags & TF_SACK_PERMIT) 2791 if (V_tcp_do_prr && to.to_flags & TOF_SACK) { 2792 tcp_timer_activate(tp, TT_REXMT, 0); 2793 tp->t_rtttime = 0; 2794 tcp_do_prr_ack(tp, th, &to, sack_changed); 2795 tp->t_flags |= TF_ACKNOW; 2796 (void) tcp_output(tp); 2797 } else 2798 tcp_sack_partialack(tp, th); 2799 else 2800 tcp_newreno_partial_ack(tp, th); 2801 } else 2802 cc_post_recovery(tp, th); 2803 } else if (IN_CONGRECOVERY(tp->t_flags)) { 2804 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2805 if (V_tcp_do_prr) { 2806 tp->sackhint.delivered_data = BYTES_THIS_ACK(tp, th); 2807 tp->snd_fack = th->th_ack; 2808 /* 2809 * During ECN cwnd reduction 2810 * always use PRR-SSRB 2811 */ 2812 tcp_do_prr_ack(tp, th, &to, SACK_CHANGE); 2813 (void) tcp_output(tp); 2814 } 2815 } else 2816 cc_post_recovery(tp, th); 2817 } 2818 /* 2819 * If we reach this point, ACK is not a duplicate, 2820 * i.e., it ACKs something we sent. 2821 */ 2822 if (tp->t_flags & TF_NEEDSYN) { 2823 /* 2824 * T/TCP: Connection was half-synchronized, and our 2825 * SYN has been ACK'd (so connection is now fully 2826 * synchronized). Go to non-starred state, 2827 * increment snd_una for ACK of SYN, and check if 2828 * we can do window scaling. 2829 */ 2830 tp->t_flags &= ~TF_NEEDSYN; 2831 tp->snd_una++; 2832 /* Do window scaling? */ 2833 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2834 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2835 tp->rcv_scale = tp->request_r_scale; 2836 /* Send window already scaled. */ 2837 } 2838 } 2839 2840 process_ACK: 2841 INP_WLOCK_ASSERT(inp); 2842 2843 /* 2844 * Adjust for the SYN bit in sequence space, 2845 * but don't account for it in cwnd calculations. 2846 * This is for the SYN_RECEIVED, non-simultaneous 2847 * SYN case. SYN_SENT and simultaneous SYN are 2848 * treated elsewhere. 2849 */ 2850 if (incforsyn) 2851 tp->snd_una++; 2852 acked = BYTES_THIS_ACK(tp, th); 2853 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2854 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2855 tp->snd_una, th->th_ack, tp, m)); 2856 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 2857 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2858 2859 /* 2860 * If we just performed our first retransmit, and the ACK 2861 * arrives within our recovery window, then it was a mistake 2862 * to do the retransmit in the first place. Recover our 2863 * original cwnd and ssthresh, and proceed to transmit where 2864 * we left off. 2865 */ 2866 if (tp->t_rxtshift == 1 && 2867 tp->t_flags & TF_PREVVALID && 2868 tp->t_badrxtwin != 0 && 2869 to.to_flags & TOF_TS && 2870 to.to_tsecr != 0 && 2871 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin)) 2872 cc_cong_signal(tp, th, CC_RTO_ERR); 2873 2874 /* 2875 * If we have a timestamp reply, update smoothed 2876 * round trip time. If no timestamp is present but 2877 * transmit timer is running and timed sequence 2878 * number was acked, update smoothed round trip time. 2879 * Since we now have an rtt measurement, cancel the 2880 * timer backoff (cf., Phil Karn's retransmit alg.). 2881 * Recompute the initial retransmit timer. 2882 * 2883 * Some boxes send broken timestamp replies 2884 * during the SYN+ACK phase, ignore 2885 * timestamps of 0 or we could calculate a 2886 * huge RTT and blow up the retransmit timer. 2887 */ 2888 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2889 uint32_t t; 2890 2891 t = tcp_ts_getticks() - to.to_tsecr; 2892 if (!tp->t_rttlow || tp->t_rttlow > t) 2893 tp->t_rttlow = t; 2894 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2895 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2896 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2897 tp->t_rttlow = ticks - tp->t_rtttime; 2898 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2899 } 2900 2901 SOCKBUF_LOCK(&so->so_snd); 2902 /* 2903 * Clear t_acktime if remote side has ACKd all data in the 2904 * socket buffer and FIN (if applicable). 2905 * Otherwise, update t_acktime if we received a sufficiently 2906 * large ACK. 2907 */ 2908 if ((tp->t_state <= TCPS_CLOSE_WAIT && 2909 acked == sbavail(&so->so_snd)) || 2910 acked > sbavail(&so->so_snd)) 2911 tp->t_acktime = 0; 2912 else if (acked > 1) 2913 tp->t_acktime = ticks; 2914 2915 /* 2916 * If all outstanding data is acked, stop retransmit 2917 * timer and remember to restart (more output or persist). 2918 * If there is more data to be acked, restart retransmit 2919 * timer, using current (possibly backed-off) value. 2920 */ 2921 if (th->th_ack == tp->snd_max) { 2922 tcp_timer_activate(tp, TT_REXMT, 0); 2923 needoutput = 1; 2924 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2925 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp)); 2926 2927 /* 2928 * If no data (only SYN) was ACK'd, 2929 * skip rest of ACK processing. 2930 */ 2931 if (acked == 0) { 2932 SOCKBUF_UNLOCK(&so->so_snd); 2933 goto step6; 2934 } 2935 2936 /* 2937 * Let the congestion control algorithm update congestion 2938 * control related information. This typically means increasing 2939 * the congestion window. 2940 */ 2941 cc_ack_received(tp, th, nsegs, CC_ACK); 2942 2943 if (acked > sbavail(&so->so_snd)) { 2944 if (tp->snd_wnd >= sbavail(&so->so_snd)) 2945 tp->snd_wnd -= sbavail(&so->so_snd); 2946 else 2947 tp->snd_wnd = 0; 2948 mfree = sbcut_locked(&so->so_snd, 2949 (int)sbavail(&so->so_snd)); 2950 ourfinisacked = 1; 2951 } else { 2952 mfree = sbcut_locked(&so->so_snd, acked); 2953 if (tp->snd_wnd >= (uint32_t) acked) 2954 tp->snd_wnd -= acked; 2955 else 2956 tp->snd_wnd = 0; 2957 ourfinisacked = 0; 2958 } 2959 /* NB: sowwakeup_locked() does an implicit unlock. */ 2960 sowwakeup_locked(so); 2961 m_freem(mfree); 2962 /* Detect una wraparound. */ 2963 if (!IN_RECOVERY(tp->t_flags) && 2964 SEQ_GT(tp->snd_una, tp->snd_recover) && 2965 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2966 tp->snd_recover = th->th_ack - 1; 2967 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2968 if (IN_RECOVERY(tp->t_flags) && 2969 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2970 EXIT_RECOVERY(tp->t_flags); 2971 } 2972 tp->snd_una = th->th_ack; 2973 if (tp->t_flags & TF_SACK_PERMIT) { 2974 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2975 tp->snd_recover = tp->snd_una; 2976 } 2977 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2978 tp->snd_nxt = tp->snd_una; 2979 2980 switch (tp->t_state) { 2981 /* 2982 * In FIN_WAIT_1 STATE in addition to the processing 2983 * for the ESTABLISHED state if our FIN is now acknowledged 2984 * then enter FIN_WAIT_2. 2985 */ 2986 case TCPS_FIN_WAIT_1: 2987 if (ourfinisacked) { 2988 /* 2989 * If we can't receive any more 2990 * data, then closing user can proceed. 2991 * Starting the timer is contrary to the 2992 * specification, but if we don't get a FIN 2993 * we'll hang forever. 2994 */ 2995 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2996 soisdisconnected(so); 2997 tcp_timer_activate(tp, TT_2MSL, 2998 (tcp_fast_finwait2_recycle ? 2999 tcp_finwait2_timeout : 3000 TP_MAXIDLE(tp))); 3001 } 3002 tcp_state_change(tp, TCPS_FIN_WAIT_2); 3003 } 3004 break; 3005 3006 /* 3007 * In CLOSING STATE in addition to the processing for 3008 * the ESTABLISHED state if the ACK acknowledges our FIN 3009 * then enter the TIME-WAIT state, otherwise ignore 3010 * the segment. 3011 */ 3012 case TCPS_CLOSING: 3013 if (ourfinisacked) { 3014 tcp_twstart(tp); 3015 m_freem(m); 3016 return; 3017 } 3018 break; 3019 3020 /* 3021 * In LAST_ACK, we may still be waiting for data to drain 3022 * and/or to be acked, as well as for the ack of our FIN. 3023 * If our FIN is now acknowledged, delete the TCB, 3024 * enter the closed state and return. 3025 */ 3026 case TCPS_LAST_ACK: 3027 if (ourfinisacked) { 3028 tp = tcp_close(tp); 3029 goto drop; 3030 } 3031 break; 3032 } 3033 } 3034 3035 step6: 3036 INP_WLOCK_ASSERT(inp); 3037 3038 /* 3039 * Update window information. 3040 * Don't look at window if no ACK: TAC's send garbage on first SYN. 3041 */ 3042 if ((thflags & TH_ACK) && 3043 (SEQ_LT(tp->snd_wl1, th->th_seq) || 3044 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 3045 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 3046 /* keep track of pure window updates */ 3047 if (tlen == 0 && 3048 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 3049 TCPSTAT_INC(tcps_rcvwinupd); 3050 tp->snd_wnd = tiwin; 3051 tp->snd_wl1 = th->th_seq; 3052 tp->snd_wl2 = th->th_ack; 3053 if (tp->snd_wnd > tp->max_sndwnd) 3054 tp->max_sndwnd = tp->snd_wnd; 3055 needoutput = 1; 3056 } 3057 3058 /* 3059 * Process segments with URG. 3060 */ 3061 if ((thflags & TH_URG) && th->th_urp && 3062 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3063 /* 3064 * This is a kludge, but if we receive and accept 3065 * random urgent pointers, we'll crash in 3066 * soreceive. It's hard to imagine someone 3067 * actually wanting to send this much urgent data. 3068 */ 3069 SOCKBUF_LOCK(&so->so_rcv); 3070 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 3071 th->th_urp = 0; /* XXX */ 3072 thflags &= ~TH_URG; /* XXX */ 3073 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 3074 goto dodata; /* XXX */ 3075 } 3076 /* 3077 * If this segment advances the known urgent pointer, 3078 * then mark the data stream. This should not happen 3079 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 3080 * a FIN has been received from the remote side. 3081 * In these states we ignore the URG. 3082 * 3083 * According to RFC961 (Assigned Protocols), 3084 * the urgent pointer points to the last octet 3085 * of urgent data. We continue, however, 3086 * to consider it to indicate the first octet 3087 * of data past the urgent section as the original 3088 * spec states (in one of two places). 3089 */ 3090 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 3091 tp->rcv_up = th->th_seq + th->th_urp; 3092 so->so_oobmark = sbavail(&so->so_rcv) + 3093 (tp->rcv_up - tp->rcv_nxt) - 1; 3094 if (so->so_oobmark == 0) 3095 so->so_rcv.sb_state |= SBS_RCVATMARK; 3096 sohasoutofband(so); 3097 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 3098 } 3099 SOCKBUF_UNLOCK(&so->so_rcv); 3100 /* 3101 * Remove out of band data so doesn't get presented to user. 3102 * This can happen independent of advancing the URG pointer, 3103 * but if two URG's are pending at once, some out-of-band 3104 * data may creep in... ick. 3105 */ 3106 if (th->th_urp <= (uint32_t)tlen && 3107 !(so->so_options & SO_OOBINLINE)) { 3108 /* hdr drop is delayed */ 3109 tcp_pulloutofband(so, th, m, drop_hdrlen); 3110 } 3111 } else { 3112 /* 3113 * If no out of band data is expected, 3114 * pull receive urgent pointer along 3115 * with the receive window. 3116 */ 3117 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 3118 tp->rcv_up = tp->rcv_nxt; 3119 } 3120 dodata: /* XXX */ 3121 INP_WLOCK_ASSERT(inp); 3122 3123 /* 3124 * Process the segment text, merging it into the TCP sequencing queue, 3125 * and arranging for acknowledgment of receipt if necessary. 3126 * This process logically involves adjusting tp->rcv_wnd as data 3127 * is presented to the user (this happens in tcp_usrreq.c, 3128 * case PRU_RCVD). If a FIN has already been received on this 3129 * connection then we just ignore the text. 3130 */ 3131 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 3132 IS_FASTOPEN(tp->t_flags)); 3133 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 3134 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3135 tcp_seq save_start = th->th_seq; 3136 tcp_seq save_rnxt = tp->rcv_nxt; 3137 int save_tlen = tlen; 3138 m_adj(m, drop_hdrlen); /* delayed header drop */ 3139 /* 3140 * Insert segment which includes th into TCP reassembly queue 3141 * with control block tp. Set thflags to whether reassembly now 3142 * includes a segment with FIN. This handles the common case 3143 * inline (segment is the next to be received on an established 3144 * connection, and the queue is empty), avoiding linkage into 3145 * and removal from the queue and repetition of various 3146 * conversions. 3147 * Set DELACK for segments received in order, but ack 3148 * immediately when segments are out of order (so 3149 * fast retransmit can work). 3150 */ 3151 if (th->th_seq == tp->rcv_nxt && 3152 SEGQ_EMPTY(tp) && 3153 (TCPS_HAVEESTABLISHED(tp->t_state) || 3154 tfo_syn)) { 3155 if (DELAY_ACK(tp, tlen) || tfo_syn) 3156 tp->t_flags |= TF_DELACK; 3157 else 3158 tp->t_flags |= TF_ACKNOW; 3159 tp->rcv_nxt += tlen; 3160 if (tlen && 3161 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 3162 (tp->t_fbyte_in == 0)) { 3163 tp->t_fbyte_in = ticks; 3164 if (tp->t_fbyte_in == 0) 3165 tp->t_fbyte_in = 1; 3166 if (tp->t_fbyte_out && tp->t_fbyte_in) 3167 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 3168 } 3169 thflags = tcp_get_flags(th) & TH_FIN; 3170 TCPSTAT_INC(tcps_rcvpack); 3171 TCPSTAT_ADD(tcps_rcvbyte, tlen); 3172 SOCKBUF_LOCK(&so->so_rcv); 3173 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 3174 m_freem(m); 3175 else 3176 sbappendstream_locked(&so->so_rcv, m, 0); 3177 tp->t_flags |= TF_WAKESOR; 3178 } else { 3179 /* 3180 * XXX: Due to the header drop above "th" is 3181 * theoretically invalid by now. Fortunately 3182 * m_adj() doesn't actually frees any mbufs 3183 * when trimming from the head. 3184 */ 3185 tcp_seq temp = save_start; 3186 3187 thflags = tcp_reass(tp, th, &temp, &tlen, m); 3188 tp->t_flags |= TF_ACKNOW; 3189 } 3190 if ((tp->t_flags & TF_SACK_PERMIT) && 3191 (save_tlen > 0) && 3192 TCPS_HAVEESTABLISHED(tp->t_state)) { 3193 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 3194 /* 3195 * DSACK actually handled in the fastpath 3196 * above. 3197 */ 3198 tcp_update_sack_list(tp, save_start, 3199 save_start + save_tlen); 3200 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 3201 if ((tp->rcv_numsacks >= 1) && 3202 (tp->sackblks[0].end == save_start)) { 3203 /* 3204 * Partial overlap, recorded at todrop 3205 * above. 3206 */ 3207 tcp_update_sack_list(tp, 3208 tp->sackblks[0].start, 3209 tp->sackblks[0].end); 3210 } else { 3211 tcp_update_dsack_list(tp, save_start, 3212 save_start + save_tlen); 3213 } 3214 } else if (tlen >= save_tlen) { 3215 /* Update of sackblks. */ 3216 tcp_update_dsack_list(tp, save_start, 3217 save_start + save_tlen); 3218 } else if (tlen > 0) { 3219 tcp_update_dsack_list(tp, save_start, 3220 save_start + tlen); 3221 } 3222 } 3223 tcp_handle_wakeup(tp); 3224 #if 0 3225 /* 3226 * Note the amount of data that peer has sent into 3227 * our window, in order to estimate the sender's 3228 * buffer size. 3229 * XXX: Unused. 3230 */ 3231 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3232 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3233 else 3234 len = so->so_rcv.sb_hiwat; 3235 #endif 3236 } else { 3237 m_freem(m); 3238 thflags &= ~TH_FIN; 3239 } 3240 3241 /* 3242 * If FIN is received ACK the FIN and let the user know 3243 * that the connection is closing. 3244 */ 3245 if (thflags & TH_FIN) { 3246 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3247 /* The socket upcall is handled by socantrcvmore. */ 3248 socantrcvmore(so); 3249 /* 3250 * If connection is half-synchronized 3251 * (ie NEEDSYN flag on) then delay ACK, 3252 * so it may be piggybacked when SYN is sent. 3253 * Otherwise, since we received a FIN then no 3254 * more input can be expected, send ACK now. 3255 */ 3256 if (tp->t_flags & TF_NEEDSYN) 3257 tp->t_flags |= TF_DELACK; 3258 else 3259 tp->t_flags |= TF_ACKNOW; 3260 tp->rcv_nxt++; 3261 } 3262 switch (tp->t_state) { 3263 /* 3264 * In SYN_RECEIVED and ESTABLISHED STATES 3265 * enter the CLOSE_WAIT state. 3266 */ 3267 case TCPS_SYN_RECEIVED: 3268 tp->t_starttime = ticks; 3269 /* FALLTHROUGH */ 3270 case TCPS_ESTABLISHED: 3271 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3272 break; 3273 3274 /* 3275 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3276 * enter the CLOSING state. 3277 */ 3278 case TCPS_FIN_WAIT_1: 3279 tcp_state_change(tp, TCPS_CLOSING); 3280 break; 3281 3282 /* 3283 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3284 * starting the time-wait timer, turning off the other 3285 * standard timers. 3286 */ 3287 case TCPS_FIN_WAIT_2: 3288 tcp_twstart(tp); 3289 return; 3290 } 3291 } 3292 TCP_PROBE3(debug__input, tp, th, m); 3293 3294 /* 3295 * Return any desired output. 3296 */ 3297 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3298 (void) tcp_output(tp); 3299 3300 check_delack: 3301 INP_WLOCK_ASSERT(inp); 3302 3303 if (tp->t_flags & TF_DELACK) { 3304 tp->t_flags &= ~TF_DELACK; 3305 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3306 } 3307 INP_WUNLOCK(inp); 3308 return; 3309 3310 dropafterack: 3311 /* 3312 * Generate an ACK dropping incoming segment if it occupies 3313 * sequence space, where the ACK reflects our state. 3314 * 3315 * We can now skip the test for the RST flag since all 3316 * paths to this code happen after packets containing 3317 * RST have been dropped. 3318 * 3319 * In the SYN-RECEIVED state, don't send an ACK unless the 3320 * segment we received passes the SYN-RECEIVED ACK test. 3321 * If it fails send a RST. This breaks the loop in the 3322 * "LAND" DoS attack, and also prevents an ACK storm 3323 * between two listening ports that have been sent forged 3324 * SYN segments, each with the source address of the other. 3325 */ 3326 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3327 (SEQ_GT(tp->snd_una, th->th_ack) || 3328 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3329 rstreason = BANDLIM_RST_OPENPORT; 3330 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 3331 goto dropwithreset; 3332 } 3333 TCP_PROBE3(debug__input, tp, th, m); 3334 tp->t_flags |= TF_ACKNOW; 3335 (void) tcp_output(tp); 3336 INP_WUNLOCK(inp); 3337 m_freem(m); 3338 return; 3339 3340 dropwithreset: 3341 if (tp != NULL) { 3342 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3343 INP_WUNLOCK(inp); 3344 } else 3345 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3346 return; 3347 3348 drop: 3349 /* 3350 * Drop space held by incoming segment and return. 3351 */ 3352 TCP_PROBE3(debug__input, tp, th, m); 3353 if (tp != NULL) { 3354 INP_WUNLOCK(inp); 3355 } 3356 m_freem(m); 3357 } 3358 3359 /* 3360 * Issue RST and make ACK acceptable to originator of segment. 3361 * The mbuf must still include the original packet header. 3362 * tp may be NULL. 3363 */ 3364 void 3365 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3366 int tlen, int rstreason) 3367 { 3368 #ifdef INET 3369 struct ip *ip; 3370 #endif 3371 #ifdef INET6 3372 struct ip6_hdr *ip6; 3373 #endif 3374 3375 if (tp != NULL) { 3376 INP_LOCK_ASSERT(tptoinpcb(tp)); 3377 } 3378 3379 /* Don't bother if destination was broadcast/multicast. */ 3380 if ((tcp_get_flags(th) & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3381 goto drop; 3382 #ifdef INET6 3383 if (mtod(m, struct ip *)->ip_v == 6) { 3384 ip6 = mtod(m, struct ip6_hdr *); 3385 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3386 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3387 goto drop; 3388 /* IPv6 anycast check is done at tcp6_input() */ 3389 } 3390 #endif 3391 #if defined(INET) && defined(INET6) 3392 else 3393 #endif 3394 #ifdef INET 3395 { 3396 ip = mtod(m, struct ip *); 3397 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3398 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3399 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3400 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3401 goto drop; 3402 } 3403 #endif 3404 3405 /* Perform bandwidth limiting. */ 3406 if (badport_bandlim(rstreason) < 0) 3407 goto drop; 3408 3409 /* tcp_respond consumes the mbuf chain. */ 3410 if (tcp_get_flags(th) & TH_ACK) { 3411 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3412 th->th_ack, TH_RST); 3413 } else { 3414 if (tcp_get_flags(th) & TH_SYN) 3415 tlen++; 3416 if (tcp_get_flags(th) & TH_FIN) 3417 tlen++; 3418 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3419 (tcp_seq)0, TH_RST|TH_ACK); 3420 } 3421 return; 3422 drop: 3423 m_freem(m); 3424 } 3425 3426 /* 3427 * Parse TCP options and place in tcpopt. 3428 */ 3429 void 3430 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3431 { 3432 int opt, optlen; 3433 3434 to->to_flags = 0; 3435 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3436 opt = cp[0]; 3437 if (opt == TCPOPT_EOL) 3438 break; 3439 if (opt == TCPOPT_NOP) 3440 optlen = 1; 3441 else { 3442 if (cnt < 2) 3443 break; 3444 optlen = cp[1]; 3445 if (optlen < 2 || optlen > cnt) 3446 break; 3447 } 3448 switch (opt) { 3449 case TCPOPT_MAXSEG: 3450 if (optlen != TCPOLEN_MAXSEG) 3451 continue; 3452 if (!(flags & TO_SYN)) 3453 continue; 3454 to->to_flags |= TOF_MSS; 3455 bcopy((char *)cp + 2, 3456 (char *)&to->to_mss, sizeof(to->to_mss)); 3457 to->to_mss = ntohs(to->to_mss); 3458 break; 3459 case TCPOPT_WINDOW: 3460 if (optlen != TCPOLEN_WINDOW) 3461 continue; 3462 if (!(flags & TO_SYN)) 3463 continue; 3464 to->to_flags |= TOF_SCALE; 3465 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3466 break; 3467 case TCPOPT_TIMESTAMP: 3468 if (optlen != TCPOLEN_TIMESTAMP) 3469 continue; 3470 to->to_flags |= TOF_TS; 3471 bcopy((char *)cp + 2, 3472 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3473 to->to_tsval = ntohl(to->to_tsval); 3474 bcopy((char *)cp + 6, 3475 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3476 to->to_tsecr = ntohl(to->to_tsecr); 3477 break; 3478 case TCPOPT_SIGNATURE: 3479 /* 3480 * In order to reply to a host which has set the 3481 * TCP_SIGNATURE option in its initial SYN, we have 3482 * to record the fact that the option was observed 3483 * here for the syncache code to perform the correct 3484 * response. 3485 */ 3486 if (optlen != TCPOLEN_SIGNATURE) 3487 continue; 3488 to->to_flags |= TOF_SIGNATURE; 3489 to->to_signature = cp + 2; 3490 break; 3491 case TCPOPT_SACK_PERMITTED: 3492 if (optlen != TCPOLEN_SACK_PERMITTED) 3493 continue; 3494 if (!(flags & TO_SYN)) 3495 continue; 3496 if (!V_tcp_do_sack) 3497 continue; 3498 to->to_flags |= TOF_SACKPERM; 3499 break; 3500 case TCPOPT_SACK: 3501 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3502 continue; 3503 if (flags & TO_SYN) 3504 continue; 3505 to->to_flags |= TOF_SACK; 3506 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3507 to->to_sacks = cp + 2; 3508 TCPSTAT_INC(tcps_sack_rcv_blocks); 3509 break; 3510 case TCPOPT_FAST_OPEN: 3511 /* 3512 * Cookie length validation is performed by the 3513 * server side cookie checking code or the client 3514 * side cookie cache update code. 3515 */ 3516 if (!(flags & TO_SYN)) 3517 continue; 3518 if (!V_tcp_fastopen_client_enable && 3519 !V_tcp_fastopen_server_enable) 3520 continue; 3521 to->to_flags |= TOF_FASTOPEN; 3522 to->to_tfo_len = optlen - 2; 3523 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3524 break; 3525 default: 3526 continue; 3527 } 3528 } 3529 } 3530 3531 /* 3532 * Pull out of band byte out of a segment so 3533 * it doesn't appear in the user's data queue. 3534 * It is still reflected in the segment length for 3535 * sequencing purposes. 3536 */ 3537 void 3538 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3539 int off) 3540 { 3541 int cnt = off + th->th_urp - 1; 3542 3543 while (cnt >= 0) { 3544 if (m->m_len > cnt) { 3545 char *cp = mtod(m, caddr_t) + cnt; 3546 struct tcpcb *tp = sototcpcb(so); 3547 3548 INP_WLOCK_ASSERT(tptoinpcb(tp)); 3549 3550 tp->t_iobc = *cp; 3551 tp->t_oobflags |= TCPOOB_HAVEDATA; 3552 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3553 m->m_len--; 3554 if (m->m_flags & M_PKTHDR) 3555 m->m_pkthdr.len--; 3556 return; 3557 } 3558 cnt -= m->m_len; 3559 m = m->m_next; 3560 if (m == NULL) 3561 break; 3562 } 3563 panic("tcp_pulloutofband"); 3564 } 3565 3566 /* 3567 * Collect new round-trip time estimate 3568 * and update averages and current timeout. 3569 */ 3570 void 3571 tcp_xmit_timer(struct tcpcb *tp, int rtt) 3572 { 3573 int delta; 3574 3575 INP_WLOCK_ASSERT(tptoinpcb(tp)); 3576 3577 TCPSTAT_INC(tcps_rttupdated); 3578 if (tp->t_rttupdated < UCHAR_MAX) 3579 tp->t_rttupdated++; 3580 #ifdef STATS 3581 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, 3582 imax(0, rtt * 1000 / hz)); 3583 #endif 3584 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) { 3585 /* 3586 * srtt is stored as fixed point with 5 bits after the 3587 * binary point (i.e., scaled by 8). The following magic 3588 * is equivalent to the smoothing algorithm in rfc793 with 3589 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3590 * point). Adjust rtt to origin 0. 3591 */ 3592 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3593 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3594 3595 if ((tp->t_srtt += delta) <= 0) 3596 tp->t_srtt = 1; 3597 3598 /* 3599 * We accumulate a smoothed rtt variance (actually, a 3600 * smoothed mean difference), then set the retransmit 3601 * timer to smoothed rtt + 4 times the smoothed variance. 3602 * rttvar is stored as fixed point with 4 bits after the 3603 * binary point (scaled by 16). The following is 3604 * equivalent to rfc793 smoothing with an alpha of .75 3605 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3606 * rfc793's wired-in beta. 3607 */ 3608 if (delta < 0) 3609 delta = -delta; 3610 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3611 if ((tp->t_rttvar += delta) <= 0) 3612 tp->t_rttvar = 1; 3613 } else { 3614 /* 3615 * No rtt measurement yet - use the unsmoothed rtt. 3616 * Set the variance to half the rtt (so our first 3617 * retransmit happens at 3*rtt). 3618 */ 3619 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3620 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3621 } 3622 tp->t_rtttime = 0; 3623 tp->t_rxtshift = 0; 3624 3625 /* 3626 * the retransmit should happen at rtt + 4 * rttvar. 3627 * Because of the way we do the smoothing, srtt and rttvar 3628 * will each average +1/2 tick of bias. When we compute 3629 * the retransmit timer, we want 1/2 tick of rounding and 3630 * 1 extra tick because of +-1/2 tick uncertainty in the 3631 * firing of the timer. The bias will give us exactly the 3632 * 1.5 tick we need. But, because the bias is 3633 * statistical, we have to test that we don't drop below 3634 * the minimum feasible timer (which is 2 ticks). 3635 */ 3636 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3637 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3638 3639 /* 3640 * We received an ack for a packet that wasn't retransmitted; 3641 * it is probably safe to discard any error indications we've 3642 * received recently. This isn't quite right, but close enough 3643 * for now (a route might have failed after we sent a segment, 3644 * and the return path might not be symmetrical). 3645 */ 3646 tp->t_softerror = 0; 3647 } 3648 3649 /* 3650 * Determine a reasonable value for maxseg size. 3651 * If the route is known, check route for mtu. 3652 * If none, use an mss that can be handled on the outgoing interface 3653 * without forcing IP to fragment. If no route is found, route has no mtu, 3654 * or the destination isn't local, use a default, hopefully conservative 3655 * size (usually 512 or the default IP max size, but no more than the mtu 3656 * of the interface), as we can't discover anything about intervening 3657 * gateways or networks. We also initialize the congestion/slow start 3658 * window to be a single segment if the destination isn't local. 3659 * While looking at the routing entry, we also initialize other path-dependent 3660 * parameters from pre-set or cached values in the routing entry. 3661 * 3662 * NOTE that resulting t_maxseg doesn't include space for TCP options or 3663 * IP options, e.g. IPSEC data, since length of this data may vary, and 3664 * thus it is calculated for every segment separately in tcp_output(). 3665 * 3666 * NOTE that this routine is only called when we process an incoming 3667 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3668 * settings are handled in tcp_mssopt(). 3669 */ 3670 void 3671 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3672 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3673 { 3674 int mss = 0; 3675 uint32_t maxmtu = 0; 3676 struct inpcb *inp = tptoinpcb(tp); 3677 struct hc_metrics_lite metrics; 3678 #ifdef INET6 3679 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3680 size_t min_protoh = isipv6 ? 3681 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3682 sizeof (struct tcpiphdr); 3683 #else 3684 size_t min_protoh = sizeof(struct tcpiphdr); 3685 #endif 3686 3687 INP_WLOCK_ASSERT(inp); 3688 3689 if (tp->t_port) 3690 min_protoh += V_tcp_udp_tunneling_overhead; 3691 if (mtuoffer != -1) { 3692 KASSERT(offer == -1, ("%s: conflict", __func__)); 3693 offer = mtuoffer - min_protoh; 3694 } 3695 3696 /* Initialize. */ 3697 #ifdef INET6 3698 if (isipv6) { 3699 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3700 tp->t_maxseg = V_tcp_v6mssdflt; 3701 } 3702 #endif 3703 #if defined(INET) && defined(INET6) 3704 else 3705 #endif 3706 #ifdef INET 3707 { 3708 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3709 tp->t_maxseg = V_tcp_mssdflt; 3710 } 3711 #endif 3712 3713 /* 3714 * No route to sender, stay with default mss and return. 3715 */ 3716 if (maxmtu == 0) { 3717 /* 3718 * In case we return early we need to initialize metrics 3719 * to a defined state as tcp_hc_get() would do for us 3720 * if there was no cache hit. 3721 */ 3722 if (metricptr != NULL) 3723 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3724 return; 3725 } 3726 3727 /* What have we got? */ 3728 switch (offer) { 3729 case 0: 3730 /* 3731 * Offer == 0 means that there was no MSS on the SYN 3732 * segment, in this case we use tcp_mssdflt as 3733 * already assigned to t_maxseg above. 3734 */ 3735 offer = tp->t_maxseg; 3736 break; 3737 3738 case -1: 3739 /* 3740 * Offer == -1 means that we didn't receive SYN yet. 3741 */ 3742 /* FALLTHROUGH */ 3743 3744 default: 3745 /* 3746 * Prevent DoS attack with too small MSS. Round up 3747 * to at least minmss. 3748 */ 3749 offer = max(offer, V_tcp_minmss); 3750 } 3751 3752 /* 3753 * rmx information is now retrieved from tcp_hostcache. 3754 */ 3755 tcp_hc_get(&inp->inp_inc, &metrics); 3756 if (metricptr != NULL) 3757 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3758 3759 /* 3760 * If there's a discovered mtu in tcp hostcache, use it. 3761 * Else, use the link mtu. 3762 */ 3763 if (metrics.rmx_mtu) 3764 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3765 else { 3766 #ifdef INET6 3767 if (isipv6) { 3768 mss = maxmtu - min_protoh; 3769 if (!V_path_mtu_discovery && 3770 !in6_localaddr(&inp->in6p_faddr)) 3771 mss = min(mss, V_tcp_v6mssdflt); 3772 } 3773 #endif 3774 #if defined(INET) && defined(INET6) 3775 else 3776 #endif 3777 #ifdef INET 3778 { 3779 mss = maxmtu - min_protoh; 3780 if (!V_path_mtu_discovery && 3781 !in_localaddr(inp->inp_faddr)) 3782 mss = min(mss, V_tcp_mssdflt); 3783 } 3784 #endif 3785 /* 3786 * XXX - The above conditional (mss = maxmtu - min_protoh) 3787 * probably violates the TCP spec. 3788 * The problem is that, since we don't know the 3789 * other end's MSS, we are supposed to use a conservative 3790 * default. But, if we do that, then MTU discovery will 3791 * never actually take place, because the conservative 3792 * default is much less than the MTUs typically seen 3793 * on the Internet today. For the moment, we'll sweep 3794 * this under the carpet. 3795 * 3796 * The conservative default might not actually be a problem 3797 * if the only case this occurs is when sending an initial 3798 * SYN with options and data to a host we've never talked 3799 * to before. Then, they will reply with an MSS value which 3800 * will get recorded and the new parameters should get 3801 * recomputed. For Further Study. 3802 */ 3803 } 3804 mss = min(mss, offer); 3805 3806 /* 3807 * Sanity check: make sure that maxseg will be large 3808 * enough to allow some data on segments even if the 3809 * all the option space is used (40bytes). Otherwise 3810 * funny things may happen in tcp_output. 3811 * 3812 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3813 */ 3814 mss = max(mss, 64); 3815 3816 tp->t_maxseg = mss; 3817 } 3818 3819 void 3820 tcp_mss(struct tcpcb *tp, int offer) 3821 { 3822 int mss; 3823 uint32_t bufsize; 3824 struct inpcb *inp = tptoinpcb(tp); 3825 struct socket *so; 3826 struct hc_metrics_lite metrics; 3827 struct tcp_ifcap cap; 3828 3829 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3830 3831 bzero(&cap, sizeof(cap)); 3832 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3833 3834 mss = tp->t_maxseg; 3835 3836 /* 3837 * If there's a pipesize, change the socket buffer to that size, 3838 * don't change if sb_hiwat is different than default (then it 3839 * has been changed on purpose with setsockopt). 3840 * Make the socket buffers an integral number of mss units; 3841 * if the mss is larger than the socket buffer, decrease the mss. 3842 */ 3843 so = inp->inp_socket; 3844 SOCKBUF_LOCK(&so->so_snd); 3845 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3846 bufsize = metrics.rmx_sendpipe; 3847 else 3848 bufsize = so->so_snd.sb_hiwat; 3849 if (bufsize < mss) 3850 mss = bufsize; 3851 else { 3852 bufsize = roundup(bufsize, mss); 3853 if (bufsize > sb_max) 3854 bufsize = sb_max; 3855 if (bufsize > so->so_snd.sb_hiwat) 3856 (void)sbreserve_locked(so, SO_SND, bufsize, NULL); 3857 } 3858 SOCKBUF_UNLOCK(&so->so_snd); 3859 /* 3860 * Sanity check: make sure that maxseg will be large 3861 * enough to allow some data on segments even if the 3862 * all the option space is used (40bytes). Otherwise 3863 * funny things may happen in tcp_output. 3864 * 3865 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3866 */ 3867 tp->t_maxseg = max(mss, 64); 3868 3869 SOCKBUF_LOCK(&so->so_rcv); 3870 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3871 bufsize = metrics.rmx_recvpipe; 3872 else 3873 bufsize = so->so_rcv.sb_hiwat; 3874 if (bufsize > mss) { 3875 bufsize = roundup(bufsize, mss); 3876 if (bufsize > sb_max) 3877 bufsize = sb_max; 3878 if (bufsize > so->so_rcv.sb_hiwat) 3879 (void)sbreserve_locked(so, SO_RCV, bufsize, NULL); 3880 } 3881 SOCKBUF_UNLOCK(&so->so_rcv); 3882 3883 /* Check the interface for TSO capabilities. */ 3884 if (cap.ifcap & CSUM_TSO) { 3885 tp->t_flags |= TF_TSO; 3886 tp->t_tsomax = cap.tsomax; 3887 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3888 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3889 } 3890 } 3891 3892 /* 3893 * Determine the MSS option to send on an outgoing SYN. 3894 */ 3895 int 3896 tcp_mssopt(struct in_conninfo *inc) 3897 { 3898 int mss = 0; 3899 uint32_t thcmtu = 0; 3900 uint32_t maxmtu = 0; 3901 size_t min_protoh; 3902 3903 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3904 3905 #ifdef INET6 3906 if (inc->inc_flags & INC_ISIPV6) { 3907 mss = V_tcp_v6mssdflt; 3908 maxmtu = tcp_maxmtu6(inc, NULL); 3909 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3910 } 3911 #endif 3912 #if defined(INET) && defined(INET6) 3913 else 3914 #endif 3915 #ifdef INET 3916 { 3917 mss = V_tcp_mssdflt; 3918 maxmtu = tcp_maxmtu(inc, NULL); 3919 min_protoh = sizeof(struct tcpiphdr); 3920 } 3921 #endif 3922 #if defined(INET6) || defined(INET) 3923 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3924 #endif 3925 3926 if (maxmtu && thcmtu) 3927 mss = min(maxmtu, thcmtu) - min_protoh; 3928 else if (maxmtu || thcmtu) 3929 mss = max(maxmtu, thcmtu) - min_protoh; 3930 3931 return (mss); 3932 } 3933 3934 void 3935 tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to, sackstatus_t sack_changed) 3936 { 3937 int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0; 3938 int maxseg = tcp_maxseg(tp); 3939 3940 INP_WLOCK_ASSERT(tptoinpcb(tp)); 3941 3942 /* 3943 * Compute the amount of data that this ACK is indicating 3944 * (del_data) and an estimate of how many bytes are in the 3945 * network. 3946 */ 3947 if (tcp_is_sack_recovery(tp, to) || 3948 (IN_CONGRECOVERY(tp->t_flags) && 3949 !IN_FASTRECOVERY(tp->t_flags))) { 3950 del_data = tp->sackhint.delivered_data; 3951 if (V_tcp_do_newsack) 3952 pipe = tcp_compute_pipe(tp); 3953 else 3954 pipe = (tp->snd_nxt - tp->snd_fack) + 3955 tp->sackhint.sack_bytes_rexmit; 3956 } else { 3957 if (tp->sackhint.prr_delivered < (tcprexmtthresh * maxseg + 3958 tp->snd_recover - tp->snd_una)) 3959 del_data = maxseg; 3960 pipe = imax(0, tp->snd_max - tp->snd_una - 3961 imin(INT_MAX / 65536, tp->t_dupacks) * maxseg); 3962 } 3963 tp->sackhint.prr_delivered += del_data; 3964 /* 3965 * Proportional Rate Reduction 3966 */ 3967 if (pipe >= tp->snd_ssthresh) { 3968 if (tp->sackhint.recover_fs == 0) 3969 tp->sackhint.recover_fs = 3970 imax(1, tp->snd_nxt - tp->snd_una); 3971 snd_cnt = howmany((long)tp->sackhint.prr_delivered * 3972 tp->snd_ssthresh, tp->sackhint.recover_fs) - 3973 tp->sackhint.prr_out + maxseg - 1; 3974 } else { 3975 /* 3976 * PRR 6937bis heuristic: 3977 * - A partial ack without SACK block beneath snd_recover 3978 * indicates further loss. 3979 * - An SACK scoreboard update adding a new hole indicates 3980 * further loss, so be conservative and send at most one 3981 * segment. 3982 * - Prevent ACK splitting attacks, by being conservative 3983 * when no new data is acked. 3984 */ 3985 if ((sack_changed == SACK_NEWLOSS) || (del_data == 0)) 3986 limit = tp->sackhint.prr_delivered - 3987 tp->sackhint.prr_out; 3988 else 3989 limit = imax(tp->sackhint.prr_delivered - 3990 tp->sackhint.prr_out, del_data) + 3991 maxseg; 3992 snd_cnt = imin((tp->snd_ssthresh - pipe), limit); 3993 } 3994 snd_cnt = imax(snd_cnt, 0) / maxseg; 3995 /* 3996 * Send snd_cnt new data into the network in response to this ack. 3997 * If there is going to be a SACK retransmission, adjust snd_cwnd 3998 * accordingly. 3999 */ 4000 if (IN_FASTRECOVERY(tp->t_flags)) { 4001 if (tcp_is_sack_recovery(tp, to)) { 4002 tp->snd_cwnd = tp->snd_nxt - tp->snd_recover + 4003 tp->sackhint.sack_bytes_rexmit + 4004 (snd_cnt * maxseg); 4005 } else { 4006 tp->snd_cwnd = (tp->snd_max - tp->snd_una) + 4007 (snd_cnt * maxseg); 4008 } 4009 } else if (IN_CONGRECOVERY(tp->t_flags)) 4010 tp->snd_cwnd = pipe - del_data + (snd_cnt * maxseg); 4011 tp->snd_cwnd = imax(maxseg, tp->snd_cwnd); 4012 } 4013 4014 /* 4015 * On a partial ack arrives, force the retransmission of the 4016 * next unacknowledged segment. Do not clear tp->t_dupacks. 4017 * By setting snd_nxt to ti_ack, this forces retransmission timer to 4018 * be started again. 4019 */ 4020 void 4021 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 4022 { 4023 tcp_seq onxt = tp->snd_nxt; 4024 uint32_t ocwnd = tp->snd_cwnd; 4025 u_int maxseg = tcp_maxseg(tp); 4026 4027 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4028 4029 tcp_timer_activate(tp, TT_REXMT, 0); 4030 tp->t_rtttime = 0; 4031 tp->snd_nxt = th->th_ack; 4032 /* 4033 * Set snd_cwnd to one segment beyond acknowledged offset. 4034 * (tp->snd_una has not yet been updated when this function is called.) 4035 */ 4036 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th); 4037 tp->t_flags |= TF_ACKNOW; 4038 (void) tcp_output(tp); 4039 tp->snd_cwnd = ocwnd; 4040 if (SEQ_GT(onxt, tp->snd_nxt)) 4041 tp->snd_nxt = onxt; 4042 /* 4043 * Partial window deflation. Relies on fact that tp->snd_una 4044 * not updated yet. 4045 */ 4046 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 4047 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 4048 else 4049 tp->snd_cwnd = 0; 4050 tp->snd_cwnd += maxseg; 4051 } 4052 4053 int 4054 tcp_compute_pipe(struct tcpcb *tp) 4055 { 4056 if (tp->t_fb->tfb_compute_pipe == NULL) { 4057 return (tp->snd_max - tp->snd_una + 4058 tp->sackhint.sack_bytes_rexmit - 4059 tp->sackhint.sacked_bytes - 4060 tp->sackhint.lost_bytes); 4061 } else { 4062 return((*tp->t_fb->tfb_compute_pipe)(tp)); 4063 } 4064 } 4065 4066 uint32_t 4067 tcp_compute_initwnd(uint32_t maxseg) 4068 { 4069 /* 4070 * Calculate the Initial Window, also used as Restart Window 4071 * 4072 * RFC5681 Section 3.1 specifies the default conservative values. 4073 * RFC3390 specifies slightly more aggressive values. 4074 * RFC6928 increases it to ten segments. 4075 * Support for user specified value for initial flight size. 4076 */ 4077 if (V_tcp_initcwnd_segments) 4078 return min(V_tcp_initcwnd_segments * maxseg, 4079 max(2 * maxseg, V_tcp_initcwnd_segments * 1460)); 4080 else if (V_tcp_do_rfc3390) 4081 return min(4 * maxseg, max(2 * maxseg, 4380)); 4082 else { 4083 /* Per RFC5681 Section 3.1 */ 4084 if (maxseg > 2190) 4085 return (2 * maxseg); 4086 else if (maxseg > 1095) 4087 return (3 * maxseg); 4088 else 4089 return (4 * maxseg); 4090 } 4091 } 4092