1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 2007-2008,2010
7 * Swinburne University of Technology, Melbourne, Australia.
8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
9 * Copyright (c) 2010 The FreeBSD Foundation
10 * Copyright (c) 2010-2011 Juniper Networks, Inc.
11 * All rights reserved.
12 *
13 * Portions of this software were developed at the Centre for Advanced Internet
14 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
15 * James Healy and David Hayes, made possible in part by a grant from the Cisco
16 * University Research Program Fund at Community Foundation Silicon Valley.
17 *
18 * Portions of this software were developed at the Centre for Advanced
19 * Internet Architectures, Swinburne University of Technology, Melbourne,
20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
21 *
22 * Portions of this software were developed by Robert N. M. Watson under
23 * contract to Juniper Networks, Inc.
24 *
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
27 * are met:
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its contributors
34 * may be used to endorse or promote products derived from this software
35 * without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * SUCH DAMAGE.
48 */
49
50 #include <sys/cdefs.h>
51 #include "opt_inet.h"
52 #include "opt_inet6.h"
53 #include "opt_ipsec.h"
54 #include "opt_rss.h"
55
56 #include <sys/param.h>
57 #include <sys/arb.h>
58 #include <sys/kernel.h>
59 #ifdef TCP_HHOOK
60 #include <sys/hhook.h>
61 #endif
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/proc.h> /* for proc0 declaration */
65 #include <sys/protosw.h>
66 #include <sys/qmath.h>
67 #include <sys/sdt.h>
68 #include <sys/signalvar.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/sysctl.h>
72 #include <sys/syslog.h>
73 #include <sys/systm.h>
74 #include <sys/stats.h>
75
76 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
77
78 #include <vm/uma.h>
79
80 #include <net/if.h>
81 #include <net/if_var.h>
82 #include <net/route.h>
83 #include <net/rss_config.h>
84 #include <net/vnet.h>
85
86 #define TCPSTATES /* for logging */
87
88 #include <netinet/in.h>
89 #include <netinet/in_kdtrace.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/in_rss.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
95 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
96 #include <netinet/ip_var.h>
97 #include <netinet/ip_options.h>
98 #include <netinet/ip6.h>
99 #include <netinet/icmp6.h>
100 #include <netinet6/in6_pcb.h>
101 #include <netinet6/in6_rss.h>
102 #include <netinet6/in6_var.h>
103 #include <netinet6/ip6_var.h>
104 #include <netinet6/nd6.h>
105 #include <netinet/tcp.h>
106 #include <netinet/tcp_fsm.h>
107 #include <netinet/tcp_seq.h>
108 #include <netinet/tcp_timer.h>
109 #include <netinet/tcp_var.h>
110 #include <netinet/tcp_log_buf.h>
111 #include <netinet6/tcp6_var.h>
112 #include <netinet/tcpip.h>
113 #include <netinet/cc/cc.h>
114 #include <netinet/tcp_fastopen.h>
115 #ifdef TCPPCAP
116 #include <netinet/tcp_pcap.h>
117 #endif
118 #include <netinet/tcp_syncache.h>
119 #ifdef TCP_OFFLOAD
120 #include <netinet/tcp_offload.h>
121 #endif
122 #include <netinet/tcp_ecn.h>
123 #include <netinet/udp.h>
124
125 #include <netipsec/ipsec_support.h>
126
127 #include <machine/in_cksum.h>
128
129 #include <security/mac/mac_framework.h>
130
131 const int tcprexmtthresh = 3;
132
133 VNET_DEFINE(int, tcp_log_in_vain) = 0;
134 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW,
135 &VNET_NAME(tcp_log_in_vain), 0,
136 "Log all incoming TCP segments to closed ports");
137
138 VNET_DEFINE(int, blackhole) = 0;
139 #define V_blackhole VNET(blackhole)
140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
141 &VNET_NAME(blackhole), 0,
142 "Do not send RST on segments to closed ports");
143
144 VNET_DEFINE(bool, blackhole_local) = false;
145 #define V_blackhole_local VNET(blackhole_local)
146 SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, blackhole_local, CTLFLAG_VNET |
147 CTLFLAG_RW, &VNET_NAME(blackhole_local), false,
148 "Enforce net.inet.tcp.blackhole for locally originated packets");
149
150 VNET_DEFINE(int, tcp_delack_enabled) = 1;
151 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
152 &VNET_NAME(tcp_delack_enabled), 0,
153 "Delay ACK to try and piggyback it onto a data packet");
154
155 VNET_DEFINE(int, drop_synfin) = 0;
156 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
157 &VNET_NAME(drop_synfin), 0,
158 "Drop TCP packets with SYN+FIN set");
159
160 VNET_DEFINE(int, tcp_do_prr) = 1;
161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr, CTLFLAG_VNET | CTLFLAG_RW,
162 &VNET_NAME(tcp_do_prr), 1,
163 "Enable Proportional Rate Reduction per RFC 6937");
164
165 VNET_DEFINE(int, tcp_do_newcwv) = 0;
166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW,
167 &VNET_NAME(tcp_do_newcwv), 0,
168 "Enable New Congestion Window Validation per RFC7661");
169
170 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
172 &VNET_NAME(tcp_do_rfc3042), 0,
173 "Enable RFC 3042 (Limited Transmit)");
174
175 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
177 &VNET_NAME(tcp_do_rfc3390), 0,
178 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
179
180 VNET_DEFINE(int, tcp_initcwnd_segments) = 10;
181 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments,
182 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0,
183 "Slow-start flight size (initial congestion window) in number of segments");
184
185 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
187 &VNET_NAME(tcp_do_rfc3465), 0,
188 "Enable RFC 3465 (Appropriate Byte Counting)");
189
190 VNET_DEFINE(int, tcp_abc_l_var) = 2;
191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
192 &VNET_NAME(tcp_abc_l_var), 2,
193 "Cap the max cwnd increment during slow-start to this number of segments");
194
195 VNET_DEFINE(int, tcp_insecure_syn) = 0;
196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
197 &VNET_NAME(tcp_insecure_syn), 0,
198 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
199
200 VNET_DEFINE(int, tcp_insecure_rst) = 0;
201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
202 &VNET_NAME(tcp_insecure_rst), 0,
203 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
204
205 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
206 #define V_tcp_recvspace VNET(tcp_recvspace)
207 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
208 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
209
210 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
212 &VNET_NAME(tcp_do_autorcvbuf), 0,
213 "Enable automatic receive buffer sizing");
214
215 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
217 &VNET_NAME(tcp_autorcvbuf_max), 0,
218 "Max size of automatic receive buffer");
219
220 VNET_DEFINE(struct inpcbinfo, tcbinfo);
221
222 /*
223 * TCP statistics are stored in an array of counter(9)s, which size matches
224 * size of struct tcpstat. TCP running connection count is a regular array.
225 */
226 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
227 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
228 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
229 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]);
230 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD |
231 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES,
232 "TCP connection counts by TCP state");
233
234 /*
235 * Kernel module interface for updating tcpstat. The first argument is an index
236 * into tcpstat treated as an array.
237 */
238 void
kmod_tcpstat_add(int statnum,int val)239 kmod_tcpstat_add(int statnum, int val)
240 {
241
242 counter_u64_add(VNET(tcpstat)[statnum], val);
243 }
244
245 /*
246 * Make sure that we only start a SACK loss recovery when
247 * receiving a duplicate ACK with a SACK block, and also
248 * complete SACK loss recovery in case the other end
249 * reneges.
250 */
251 static bool inline
tcp_is_sack_recovery(struct tcpcb * tp,struct tcpopt * to)252 tcp_is_sack_recovery(struct tcpcb *tp, struct tcpopt *to)
253 {
254 return ((tp->t_flags & TF_SACK_PERMIT) &&
255 ((to->to_flags & TOF_SACK) ||
256 (!TAILQ_EMPTY(&tp->snd_holes))));
257 }
258
259 #ifdef TCP_HHOOK
260 /*
261 * Wrapper for the TCP established input helper hook.
262 */
263 void
hhook_run_tcp_est_in(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to)264 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
265 {
266 struct tcp_hhook_data hhook_data;
267
268 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
269 hhook_data.tp = tp;
270 hhook_data.th = th;
271 hhook_data.to = to;
272
273 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
274 &tp->t_osd);
275 }
276 }
277 #endif
278
279 /*
280 * CC wrapper hook functions
281 */
282 void
cc_ack_received(struct tcpcb * tp,struct tcphdr * th,uint16_t nsegs,uint16_t type)283 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs,
284 uint16_t type)
285 {
286 #ifdef STATS
287 int32_t gput;
288 #endif
289
290 INP_WLOCK_ASSERT(tptoinpcb(tp));
291
292 tp->t_ccv.nsegs = nsegs;
293 tp->t_ccv.bytes_this_ack = BYTES_THIS_ACK(tp, th);
294 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) ||
295 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) &&
296 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2))))
297 tp->t_ccv.flags |= CCF_CWND_LIMITED;
298 else
299 tp->t_ccv.flags &= ~CCF_CWND_LIMITED;
300
301 if (type == CC_ACK) {
302 #ifdef STATS
303 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
304 ((int32_t)tp->snd_cwnd) - tp->snd_wnd);
305 if (!IN_RECOVERY(tp->t_flags))
306 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN,
307 tp->t_ccv.bytes_this_ack / (tcp_maxseg(tp) * nsegs));
308 if ((tp->t_flags & TF_GPUTINPROG) &&
309 SEQ_GEQ(th->th_ack, tp->gput_ack)) {
310 /*
311 * Compute goodput in bits per millisecond.
312 */
313 gput = (((int64_t)SEQ_SUB(th->th_ack, tp->gput_seq)) << 3) /
314 max(1, tcp_ts_getticks() - tp->gput_ts);
315 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
316 gput);
317 /*
318 * XXXLAS: This is a temporary hack, and should be
319 * chained off VOI_TCP_GPUT when stats(9) grows an API
320 * to deal with chained VOIs.
321 */
322 if (tp->t_stats_gput_prev > 0)
323 stats_voi_update_abs_s32(tp->t_stats,
324 VOI_TCP_GPUT_ND,
325 ((gput - tp->t_stats_gput_prev) * 100) /
326 tp->t_stats_gput_prev);
327 tp->t_flags &= ~TF_GPUTINPROG;
328 tp->t_stats_gput_prev = gput;
329 }
330 #endif /* STATS */
331 if (tp->snd_cwnd > tp->snd_ssthresh) {
332 tp->t_bytes_acked += tp->t_ccv.bytes_this_ack;
333 if (tp->t_bytes_acked >= tp->snd_cwnd) {
334 tp->t_bytes_acked -= tp->snd_cwnd;
335 tp->t_ccv.flags |= CCF_ABC_SENTAWND;
336 }
337 } else {
338 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
339 tp->t_bytes_acked = 0;
340 }
341 }
342
343 if (CC_ALGO(tp)->ack_received != NULL) {
344 /* XXXLAS: Find a way to live without this */
345 tp->t_ccv.curack = th->th_ack;
346 CC_ALGO(tp)->ack_received(&tp->t_ccv, type);
347 }
348 #ifdef STATS
349 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd);
350 #endif
351 }
352
353 void
cc_conn_init(struct tcpcb * tp)354 cc_conn_init(struct tcpcb *tp)
355 {
356 struct hc_metrics_lite metrics;
357 struct inpcb *inp = tptoinpcb(tp);
358 u_int maxseg;
359 int rtt;
360
361 INP_WLOCK_ASSERT(inp);
362
363 tcp_hc_get(&inp->inp_inc, &metrics);
364 maxseg = tcp_maxseg(tp);
365
366 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
367 tp->t_srtt = rtt;
368 TCPSTAT_INC(tcps_usedrtt);
369 if (metrics.rmx_rttvar) {
370 tp->t_rttvar = metrics.rmx_rttvar;
371 TCPSTAT_INC(tcps_usedrttvar);
372 } else {
373 /* default variation is +- 1 rtt */
374 tp->t_rttvar =
375 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
376 }
377 TCPT_RANGESET(tp->t_rxtcur,
378 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
379 tp->t_rttmin, TCPTV_REXMTMAX);
380 }
381 if (metrics.rmx_ssthresh) {
382 /*
383 * There's some sort of gateway or interface
384 * buffer limit on the path. Use this to set
385 * the slow start threshold, but set the
386 * threshold to no less than 2*mss.
387 */
388 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
389 TCPSTAT_INC(tcps_usedssthresh);
390 }
391
392 /*
393 * Set the initial slow-start flight size.
394 *
395 * If a SYN or SYN/ACK was lost and retransmitted, we have to
396 * reduce the initial CWND to one segment as congestion is likely
397 * requiring us to be cautious.
398 */
399 if (tp->snd_cwnd == 1)
400 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */
401 else
402 tp->snd_cwnd = tcp_compute_initwnd(maxseg);
403
404 if (CC_ALGO(tp)->conn_init != NULL)
405 CC_ALGO(tp)->conn_init(&tp->t_ccv);
406 }
407
408 void inline
cc_cong_signal(struct tcpcb * tp,struct tcphdr * th,uint32_t type)409 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
410 {
411 INP_WLOCK_ASSERT(tptoinpcb(tp));
412
413 #ifdef STATS
414 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
415 #endif
416
417 switch(type) {
418 case CC_NDUPACK:
419 if (!IN_FASTRECOVERY(tp->t_flags)) {
420 tp->snd_recover = tp->snd_max;
421 if (tp->t_flags2 & TF2_ECN_PERMIT)
422 tp->t_flags2 |= TF2_ECN_SND_CWR;
423 }
424 break;
425 case CC_ECN:
426 if (!IN_CONGRECOVERY(tp->t_flags) ||
427 /*
428 * Allow ECN reaction on ACK to CWR, if
429 * that data segment was also CE marked.
430 */
431 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
432 EXIT_CONGRECOVERY(tp->t_flags);
433 TCPSTAT_INC(tcps_ecn_rcwnd);
434 tp->snd_recover = tp->snd_max + 1;
435 if (tp->t_flags2 & TF2_ECN_PERMIT)
436 tp->t_flags2 |= TF2_ECN_SND_CWR;
437 }
438 break;
439 case CC_RTO:
440 tp->t_dupacks = 0;
441 tp->t_bytes_acked = 0;
442 if ((tp->t_rxtshift > 1) ||
443 !((tp->t_flags & TF_SACK_PERMIT) &&
444 (!TAILQ_EMPTY(&tp->snd_holes))))
445 EXIT_RECOVERY(tp->t_flags);
446 if (tp->t_flags2 & TF2_ECN_PERMIT)
447 tp->t_flags2 |= TF2_ECN_SND_CWR;
448 break;
449 case CC_RTO_ERR:
450 TCPSTAT_INC(tcps_sndrexmitbad);
451 /* RTO was unnecessary, so reset everything. */
452 tp->snd_cwnd = tp->snd_cwnd_prev;
453 tp->snd_ssthresh = tp->snd_ssthresh_prev;
454 tp->snd_recover = tp->snd_recover_prev;
455 if (tp->t_flags & TF_WASFRECOVERY)
456 ENTER_FASTRECOVERY(tp->t_flags);
457 if (tp->t_flags & TF_WASCRECOVERY)
458 ENTER_CONGRECOVERY(tp->t_flags);
459 tp->snd_nxt = tp->snd_max;
460 tp->t_flags &= ~TF_PREVVALID;
461 tp->t_badrxtwin = 0;
462 break;
463 }
464 if (SEQ_LT(tp->snd_fack, tp->snd_una) ||
465 SEQ_GT(tp->snd_fack, tp->snd_max)) {
466 tp->snd_fack = tp->snd_una;
467 }
468
469 if (CC_ALGO(tp)->cong_signal != NULL) {
470 if (th != NULL)
471 tp->t_ccv.curack = th->th_ack;
472 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type);
473 }
474 }
475
476 void inline
cc_post_recovery(struct tcpcb * tp,struct tcphdr * th)477 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
478 {
479 INP_WLOCK_ASSERT(tptoinpcb(tp));
480
481 if (CC_ALGO(tp)->post_recovery != NULL) {
482 if (SEQ_LT(tp->snd_fack, th->th_ack) ||
483 SEQ_GT(tp->snd_fack, tp->snd_max)) {
484 tp->snd_fack = th->th_ack;
485 }
486 tp->t_ccv.curack = th->th_ack;
487 CC_ALGO(tp)->post_recovery(&tp->t_ccv);
488 }
489 EXIT_RECOVERY(tp->t_flags);
490
491 tp->t_bytes_acked = 0;
492 tp->sackhint.delivered_data = 0;
493 tp->sackhint.prr_delivered = 0;
494 tp->sackhint.prr_out = 0;
495 }
496
497 /*
498 * Indicate whether this ack should be delayed. We can delay the ack if
499 * following conditions are met:
500 * - There is no delayed ack timer in progress.
501 * - Our last ack wasn't a 0-sized window. We never want to delay
502 * the ack that opens up a 0-sized window.
503 * - LRO wasn't used for this segment. We make sure by checking that the
504 * segment size is not larger than the MSS.
505 */
506 #define DELAY_ACK(tp, tlen) \
507 ((!tcp_timer_active(tp, TT_DELACK) && \
508 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
509 (tlen <= tp->t_maxseg) && \
510 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
511
512 void inline
cc_ecnpkt_handler_flags(struct tcpcb * tp,uint16_t flags,uint8_t iptos)513 cc_ecnpkt_handler_flags(struct tcpcb *tp, uint16_t flags, uint8_t iptos)
514 {
515 INP_WLOCK_ASSERT(tptoinpcb(tp));
516
517 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
518 switch (iptos & IPTOS_ECN_MASK) {
519 case IPTOS_ECN_CE:
520 tp->t_ccv.flags |= CCF_IPHDR_CE;
521 break;
522 case IPTOS_ECN_ECT0:
523 /* FALLTHROUGH */
524 case IPTOS_ECN_ECT1:
525 /* FALLTHROUGH */
526 case IPTOS_ECN_NOTECT:
527 tp->t_ccv.flags &= ~CCF_IPHDR_CE;
528 break;
529 }
530
531 if (flags & TH_CWR)
532 tp->t_ccv.flags |= CCF_TCPHDR_CWR;
533 else
534 tp->t_ccv.flags &= ~CCF_TCPHDR_CWR;
535
536 CC_ALGO(tp)->ecnpkt_handler(&tp->t_ccv);
537
538 if (tp->t_ccv.flags & CCF_ACKNOW) {
539 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
540 tp->t_flags |= TF_ACKNOW;
541 }
542 }
543 }
544
545 void inline
cc_ecnpkt_handler(struct tcpcb * tp,struct tcphdr * th,uint8_t iptos)546 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
547 {
548 cc_ecnpkt_handler_flags(tp, tcp_get_flags(th), iptos);
549 }
550
551 /*
552 * TCP input handling is split into multiple parts:
553 * tcp6_input is a thin wrapper around tcp_input for the extended
554 * ip6_protox[] call format in ip6_input
555 * tcp_input handles primary segment validation, inpcb lookup and
556 * SYN processing on listen sockets
557 * tcp_do_segment processes the ACK and text of the segment for
558 * establishing, established and closing connections
559 */
560 #ifdef INET6
561 int
tcp6_input_with_port(struct mbuf ** mp,int * offp,int proto,uint16_t port)562 tcp6_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port)
563 {
564 struct mbuf *m;
565 struct in6_ifaddr *ia6;
566 struct ip6_hdr *ip6;
567
568 m = *mp;
569 if (m->m_len < *offp + sizeof(struct tcphdr)) {
570 m = m_pullup(m, *offp + sizeof(struct tcphdr));
571 if (m == NULL) {
572 *mp = m;
573 TCPSTAT_INC(tcps_rcvshort);
574 return (IPPROTO_DONE);
575 }
576 }
577
578 /*
579 * draft-itojun-ipv6-tcp-to-anycast
580 * better place to put this in?
581 */
582 ip6 = mtod(m, struct ip6_hdr *);
583 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
584 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
585 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
586 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
587 *mp = NULL;
588 return (IPPROTO_DONE);
589 }
590
591 *mp = m;
592 return (tcp_input_with_port(mp, offp, proto, port));
593 }
594
595 int
tcp6_input(struct mbuf ** mp,int * offp,int proto)596 tcp6_input(struct mbuf **mp, int *offp, int proto)
597 {
598
599 return(tcp6_input_with_port(mp, offp, proto, 0));
600 }
601 #endif /* INET6 */
602
603 int
tcp_input_with_port(struct mbuf ** mp,int * offp,int proto,uint16_t port)604 tcp_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port)
605 {
606 struct mbuf *m = *mp;
607 struct tcphdr *th = NULL;
608 struct ip *ip = NULL;
609 struct inpcb *inp = NULL;
610 struct tcpcb *tp = NULL;
611 struct socket *so = NULL;
612 u_char *optp = NULL;
613 int off0;
614 int optlen = 0;
615 #ifdef INET
616 int len;
617 uint8_t ipttl;
618 #endif
619 int tlen = 0, off;
620 int drop_hdrlen;
621 int thflags;
622 int rstreason = 0; /* For badport_bandlim accounting purposes */
623 int lookupflag;
624 uint8_t iptos;
625 struct m_tag *fwd_tag = NULL;
626 #ifdef INET6
627 struct ip6_hdr *ip6 = NULL;
628 int isipv6;
629 #else
630 const void *ip6 = NULL;
631 #endif /* INET6 */
632 struct tcpopt to; /* options in this segment */
633 char *s = NULL; /* address and port logging */
634
635 NET_EPOCH_ASSERT();
636
637 #ifdef INET6
638 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
639 #endif
640
641 off0 = *offp;
642 m = *mp;
643 *mp = NULL;
644 to.to_flags = 0;
645 TCPSTAT_INC(tcps_rcvtotal);
646
647 m->m_pkthdr.tcp_tun_port = port;
648 #ifdef INET6
649 if (isipv6) {
650 ip6 = mtod(m, struct ip6_hdr *);
651 th = (struct tcphdr *)((caddr_t)ip6 + off0);
652 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
653 if (port)
654 goto skip6_csum;
655 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
656 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
657 th->th_sum = m->m_pkthdr.csum_data;
658 else
659 th->th_sum = in6_cksum_pseudo(ip6, tlen,
660 IPPROTO_TCP, m->m_pkthdr.csum_data);
661 th->th_sum ^= 0xffff;
662 } else
663 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
664 if (th->th_sum) {
665 TCPSTAT_INC(tcps_rcvbadsum);
666 goto drop;
667 }
668 skip6_csum:
669 /*
670 * Be proactive about unspecified IPv6 address in source.
671 * As we use all-zero to indicate unbounded/unconnected pcb,
672 * unspecified IPv6 address can be used to confuse us.
673 *
674 * Note that packets with unspecified IPv6 destination is
675 * already dropped in ip6_input.
676 */
677 KASSERT(!IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst),
678 ("%s: unspecified destination v6 address", __func__));
679 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
680 IP6STAT_INC(ip6s_badscope); /* XXX */
681 goto drop;
682 }
683 iptos = IPV6_TRAFFIC_CLASS(ip6);
684 }
685 #endif
686 #if defined(INET) && defined(INET6)
687 else
688 #endif
689 #ifdef INET
690 {
691 /*
692 * Get IP and TCP header together in first mbuf.
693 * Note: IP leaves IP header in first mbuf.
694 */
695 if (off0 > sizeof (struct ip)) {
696 ip_stripoptions(m);
697 off0 = sizeof(struct ip);
698 }
699 if (m->m_len < sizeof (struct tcpiphdr)) {
700 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
701 == NULL) {
702 TCPSTAT_INC(tcps_rcvshort);
703 return (IPPROTO_DONE);
704 }
705 }
706 ip = mtod(m, struct ip *);
707 th = (struct tcphdr *)((caddr_t)ip + off0);
708 tlen = ntohs(ip->ip_len) - off0;
709
710 iptos = ip->ip_tos;
711 if (port)
712 goto skip_csum;
713 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
714 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
715 th->th_sum = m->m_pkthdr.csum_data;
716 else
717 th->th_sum = in_pseudo(ip->ip_src.s_addr,
718 ip->ip_dst.s_addr,
719 htonl(m->m_pkthdr.csum_data + tlen +
720 IPPROTO_TCP));
721 th->th_sum ^= 0xffff;
722 } else {
723 struct ipovly *ipov = (struct ipovly *)ip;
724
725 /*
726 * Checksum extended TCP header and data.
727 */
728 len = off0 + tlen;
729 ipttl = ip->ip_ttl;
730 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
731 ipov->ih_len = htons(tlen);
732 th->th_sum = in_cksum(m, len);
733 /* Reset length for SDT probes. */
734 ip->ip_len = htons(len);
735 /* Reset TOS bits */
736 ip->ip_tos = iptos;
737 /* Re-initialization for later version check */
738 ip->ip_ttl = ipttl;
739 ip->ip_v = IPVERSION;
740 ip->ip_hl = off0 >> 2;
741 }
742 skip_csum:
743 if (th->th_sum && (port == 0)) {
744 TCPSTAT_INC(tcps_rcvbadsum);
745 goto drop;
746 }
747 KASSERT(ip->ip_dst.s_addr != INADDR_ANY,
748 ("%s: unspecified destination v4 address", __func__));
749 if (__predict_false(ip->ip_src.s_addr == INADDR_ANY)) {
750 IPSTAT_INC(ips_badaddr);
751 goto drop;
752 }
753 }
754 #endif /* INET */
755
756 /*
757 * Check that TCP offset makes sense,
758 * pull out TCP options and adjust length. XXX
759 */
760 off = th->th_off << 2;
761 if (off < sizeof (struct tcphdr) || off > tlen) {
762 TCPSTAT_INC(tcps_rcvbadoff);
763 goto drop;
764 }
765 tlen -= off; /* tlen is used instead of ti->ti_len */
766 if (off > sizeof (struct tcphdr)) {
767 #ifdef INET6
768 if (isipv6) {
769 if (m->m_len < off0 + off) {
770 m = m_pullup(m, off0 + off);
771 if (m == NULL) {
772 TCPSTAT_INC(tcps_rcvshort);
773 return (IPPROTO_DONE);
774 }
775 }
776 ip6 = mtod(m, struct ip6_hdr *);
777 th = (struct tcphdr *)((caddr_t)ip6 + off0);
778 }
779 #endif
780 #if defined(INET) && defined(INET6)
781 else
782 #endif
783 #ifdef INET
784 {
785 if (m->m_len < sizeof(struct ip) + off) {
786 if ((m = m_pullup(m, sizeof (struct ip) + off))
787 == NULL) {
788 TCPSTAT_INC(tcps_rcvshort);
789 return (IPPROTO_DONE);
790 }
791 ip = mtod(m, struct ip *);
792 th = (struct tcphdr *)((caddr_t)ip + off0);
793 }
794 }
795 #endif
796 optlen = off - sizeof (struct tcphdr);
797 optp = (u_char *)(th + 1);
798 }
799 thflags = tcp_get_flags(th);
800
801 /*
802 * Convert TCP protocol specific fields to host format.
803 */
804 tcp_fields_to_host(th);
805
806 /*
807 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
808 */
809 drop_hdrlen = off0 + off;
810
811 /*
812 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
813 */
814 if (
815 #ifdef INET6
816 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
817 #ifdef INET
818 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
819 #endif
820 #endif
821 #if defined(INET) && !defined(INET6)
822 (m->m_flags & M_IP_NEXTHOP)
823 #endif
824 )
825 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
826
827 /*
828 * For initial SYN packets we don't need write lock on matching
829 * PCB, be it a listening one or a synchronized one. The packet
830 * shall not modify its state.
831 */
832 lookupflag = INPLOOKUP_WILDCARD |
833 ((thflags & (TH_ACK|TH_SYN)) == TH_SYN ?
834 INPLOOKUP_RLOCKPCB : INPLOOKUP_WLOCKPCB);
835 findpcb:
836 #ifdef INET6
837 if (isipv6 && fwd_tag != NULL) {
838 struct sockaddr_in6 *next_hop6;
839
840 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
841 /*
842 * Transparently forwarded. Pretend to be the destination.
843 * Already got one like this?
844 */
845 inp = in6_pcblookup_mbuf(&V_tcbinfo,
846 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
847 lookupflag & ~INPLOOKUP_WILDCARD, m->m_pkthdr.rcvif, m);
848 if (!inp) {
849 /*
850 * It's new. Try to find the ambushing socket.
851 * Because we've rewritten the destination address,
852 * any hardware-generated hash is ignored.
853 */
854 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
855 th->th_sport, &next_hop6->sin6_addr,
856 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
857 th->th_dport, lookupflag, m->m_pkthdr.rcvif);
858 }
859 } else if (isipv6) {
860 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
861 th->th_sport, &ip6->ip6_dst, th->th_dport, lookupflag,
862 m->m_pkthdr.rcvif, m);
863 }
864 #endif /* INET6 */
865 #if defined(INET6) && defined(INET)
866 else
867 #endif
868 #ifdef INET
869 if (fwd_tag != NULL) {
870 struct sockaddr_in *next_hop;
871
872 next_hop = (struct sockaddr_in *)(fwd_tag+1);
873 /*
874 * Transparently forwarded. Pretend to be the destination.
875 * already got one like this?
876 */
877 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
878 ip->ip_dst, th->th_dport, lookupflag & ~INPLOOKUP_WILDCARD,
879 m->m_pkthdr.rcvif, m);
880 if (!inp) {
881 /*
882 * It's new. Try to find the ambushing socket.
883 * Because we've rewritten the destination address,
884 * any hardware-generated hash is ignored.
885 */
886 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
887 th->th_sport, next_hop->sin_addr,
888 next_hop->sin_port ? ntohs(next_hop->sin_port) :
889 th->th_dport, lookupflag, m->m_pkthdr.rcvif);
890 }
891 } else
892 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
893 th->th_sport, ip->ip_dst, th->th_dport, lookupflag,
894 m->m_pkthdr.rcvif, m);
895 #endif /* INET */
896
897 /*
898 * If the INPCB does not exist then all data in the incoming
899 * segment is discarded and an appropriate RST is sent back.
900 * XXX MRT Send RST using which routing table?
901 */
902 if (inp == NULL) {
903 if (rstreason != 0) {
904 /* We came here after second (safety) lookup. */
905 MPASS((lookupflag & INPLOOKUP_WILDCARD) == 0);
906 goto dropwithreset;
907 }
908 /*
909 * Log communication attempts to ports that are not
910 * in use.
911 */
912 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
913 V_tcp_log_in_vain == 2) {
914 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
915 log(LOG_INFO, "%s; %s: Connection attempt "
916 "to closed port\n", s, __func__);
917 }
918 /*
919 * When blackholing do not respond with a RST but
920 * completely ignore the segment and drop it.
921 */
922 if (((V_blackhole == 1 && (thflags & TH_SYN)) ||
923 V_blackhole == 2) && (V_blackhole_local || (
924 #ifdef INET6
925 isipv6 ? !in6_localaddr(&ip6->ip6_src) :
926 #endif
927 #ifdef INET
928 !in_localip(ip->ip_src)
929 #else
930 true
931 #endif
932 )))
933 goto dropunlock;
934
935 rstreason = BANDLIM_RST_CLOSEDPORT;
936 goto dropwithreset;
937 }
938 INP_LOCK_ASSERT(inp);
939
940 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
941 !SOLISTENING(inp->inp_socket)) {
942 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
943 inp->inp_flowid = m->m_pkthdr.flowid;
944 inp->inp_flowtype = M_HASHTYPE_GET(m);
945 #ifdef RSS
946 } else {
947 /* assign flowid by software RSS hash */
948 #ifdef INET6
949 if (isipv6) {
950 rss_proto_software_hash_v6(&inp->in6p_faddr,
951 &inp->in6p_laddr,
952 inp->inp_fport,
953 inp->inp_lport,
954 IPPROTO_TCP,
955 &inp->inp_flowid,
956 &inp->inp_flowtype);
957 } else
958 #endif /* INET6 */
959 {
960 rss_proto_software_hash_v4(inp->inp_faddr,
961 inp->inp_laddr,
962 inp->inp_fport,
963 inp->inp_lport,
964 IPPROTO_TCP,
965 &inp->inp_flowid,
966 &inp->inp_flowtype);
967 }
968 #endif /* RSS */
969 }
970 }
971 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
972 #ifdef INET6
973 if (isipv6 && IPSEC_ENABLED(ipv6) &&
974 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) {
975 goto dropunlock;
976 }
977 #ifdef INET
978 else
979 #endif
980 #endif /* INET6 */
981 #ifdef INET
982 if (IPSEC_ENABLED(ipv4) &&
983 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) {
984 goto dropunlock;
985 }
986 #endif /* INET */
987 #endif /* IPSEC */
988
989 /*
990 * Check the minimum TTL for socket.
991 */
992 if (inp->inp_ip_minttl != 0) {
993 #ifdef INET6
994 if (isipv6) {
995 if (inp->inp_ip_minttl > ip6->ip6_hlim)
996 goto dropunlock;
997 } else
998 #endif
999 if (inp->inp_ip_minttl > ip->ip_ttl)
1000 goto dropunlock;
1001 }
1002
1003 tp = intotcpcb(inp);
1004 switch (tp->t_state) {
1005 case TCPS_TIME_WAIT:
1006 /*
1007 * A previous connection in TIMEWAIT state is supposed to catch
1008 * stray or duplicate segments arriving late. If this segment
1009 * was a legitimate new connection attempt, the old INPCB gets
1010 * removed and we can try again to find a listening socket.
1011 */
1012 tcp_dooptions(&to, optp, optlen,
1013 (thflags & TH_SYN) ? TO_SYN : 0);
1014 /*
1015 * tcp_twcheck unlocks the inp always, and frees the m if fails.
1016 */
1017 if (tcp_twcheck(inp, &to, th, m, tlen))
1018 goto findpcb;
1019 return (IPPROTO_DONE);
1020 case TCPS_CLOSED:
1021 /*
1022 * The TCPCB may no longer exist if the connection is winding
1023 * down or it is in the CLOSED state. Either way we drop the
1024 * segment and send an appropriate response.
1025 */
1026 rstreason = BANDLIM_RST_CLOSEDPORT;
1027 goto dropwithreset;
1028 }
1029
1030 if ((tp->t_port != port) && (tp->t_state > TCPS_LISTEN)) {
1031 rstreason = BANDLIM_RST_CLOSEDPORT;
1032 goto dropwithreset;
1033 }
1034
1035 #ifdef TCP_OFFLOAD
1036 if (tp->t_flags & TF_TOE) {
1037 tcp_offload_input(tp, m);
1038 m = NULL; /* consumed by the TOE driver */
1039 goto dropunlock;
1040 }
1041 #endif
1042
1043 #ifdef MAC
1044 if (mac_inpcb_check_deliver(inp, m))
1045 goto dropunlock;
1046 #endif
1047 so = inp->inp_socket;
1048 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1049 /*
1050 * When the socket is accepting connections (the INPCB is in LISTEN
1051 * state) we look into the SYN cache if this is a new connection
1052 * attempt or the completion of a previous one.
1053 */
1054 KASSERT(tp->t_state == TCPS_LISTEN || !SOLISTENING(so),
1055 ("%s: so accepting but tp %p not listening", __func__, tp));
1056 if (tp->t_state == TCPS_LISTEN && SOLISTENING(so)) {
1057 struct in_conninfo inc;
1058
1059 bzero(&inc, sizeof(inc));
1060 #ifdef INET6
1061 if (isipv6) {
1062 inc.inc_flags |= INC_ISIPV6;
1063 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU)
1064 inc.inc_flags |= INC_IPV6MINMTU;
1065 inc.inc6_faddr = ip6->ip6_src;
1066 inc.inc6_laddr = ip6->ip6_dst;
1067 } else
1068 #endif
1069 {
1070 inc.inc_faddr = ip->ip_src;
1071 inc.inc_laddr = ip->ip_dst;
1072 }
1073 inc.inc_fport = th->th_sport;
1074 inc.inc_lport = th->th_dport;
1075 inc.inc_fibnum = so->so_fibnum;
1076
1077 /*
1078 * Check for an existing connection attempt in syncache if
1079 * the flag is only ACK. A successful lookup creates a new
1080 * socket appended to the listen queue in SYN_RECEIVED state.
1081 */
1082 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1083 /*
1084 * Parse the TCP options here because
1085 * syncookies need access to the reflected
1086 * timestamp.
1087 */
1088 tcp_dooptions(&to, optp, optlen, 0);
1089 /*
1090 * NB: syncache_expand() doesn't unlock inp.
1091 */
1092 rstreason = syncache_expand(&inc, &to, th, &so, m, port);
1093 if (rstreason < 0) {
1094 /*
1095 * A failing TCP MD5 signature comparison
1096 * must result in the segment being dropped
1097 * and must not produce any response back
1098 * to the sender.
1099 */
1100 goto dropunlock;
1101 } else if (rstreason == 0) {
1102 /*
1103 * No syncache entry, or ACK was not for our
1104 * SYN/ACK. Do our protection against double
1105 * ACK. If peer sent us 2 ACKs, then for the
1106 * first one syncache_expand() successfully
1107 * converted syncache entry into a socket,
1108 * while we were waiting on the inpcb lock. We
1109 * don't want to sent RST for the second ACK,
1110 * so we perform second lookup without wildcard
1111 * match, hoping to find the new socket. If
1112 * the ACK is stray indeed, rstreason would
1113 * hint the above code that the lookup was a
1114 * second attempt.
1115 *
1116 * NB: syncache did its own logging
1117 * of the failure cause.
1118 */
1119 INP_WUNLOCK(inp);
1120 rstreason = BANDLIM_RST_OPENPORT;
1121 lookupflag &= ~INPLOOKUP_WILDCARD;
1122 goto findpcb;
1123 }
1124 tfo_socket_result:
1125 if (so == NULL) {
1126 /*
1127 * We completed the 3-way handshake
1128 * but could not allocate a socket
1129 * either due to memory shortage,
1130 * listen queue length limits or
1131 * global socket limits. Send RST
1132 * or wait and have the remote end
1133 * retransmit the ACK for another
1134 * try.
1135 */
1136 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1137 log(LOG_DEBUG, "%s; %s: Listen socket: "
1138 "Socket allocation failed due to "
1139 "limits or memory shortage, %s\n",
1140 s, __func__,
1141 V_tcp_sc_rst_sock_fail ?
1142 "sending RST" : "try again");
1143 if (V_tcp_sc_rst_sock_fail) {
1144 rstreason = BANDLIM_UNLIMITED;
1145 goto dropwithreset;
1146 } else
1147 goto dropunlock;
1148 }
1149 /*
1150 * Socket is created in state SYN_RECEIVED.
1151 * Unlock the listen socket, lock the newly
1152 * created socket and update the tp variable.
1153 * If we came here via jump to tfo_socket_result,
1154 * then listening socket is read-locked.
1155 */
1156 INP_UNLOCK(inp); /* listen socket */
1157 inp = sotoinpcb(so);
1158 /*
1159 * New connection inpcb is already locked by
1160 * syncache_expand().
1161 */
1162 INP_WLOCK_ASSERT(inp);
1163 tp = intotcpcb(inp);
1164 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1165 ("%s: ", __func__));
1166 /*
1167 * Process the segment and the data it
1168 * contains. tcp_do_segment() consumes
1169 * the mbuf chain and unlocks the inpcb.
1170 */
1171 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1172 tp->t_fb->tfb_tcp_do_segment(tp, m, th, drop_hdrlen,
1173 tlen, iptos);
1174 return (IPPROTO_DONE);
1175 }
1176 /*
1177 * Segment flag validation for new connection attempts:
1178 *
1179 * Our (SYN|ACK) response was rejected.
1180 * Check with syncache and remove entry to prevent
1181 * retransmits.
1182 *
1183 * NB: syncache_chkrst does its own logging of failure
1184 * causes.
1185 */
1186 if (thflags & TH_RST) {
1187 syncache_chkrst(&inc, th, m, port);
1188 goto dropunlock;
1189 }
1190 /*
1191 * We can't do anything without SYN.
1192 */
1193 if ((thflags & TH_SYN) == 0) {
1194 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1195 log(LOG_DEBUG, "%s; %s: Listen socket: "
1196 "SYN is missing, segment ignored\n",
1197 s, __func__);
1198 TCPSTAT_INC(tcps_badsyn);
1199 goto dropunlock;
1200 }
1201 /*
1202 * (SYN|ACK) is bogus on a listen socket.
1203 */
1204 if (thflags & TH_ACK) {
1205 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1206 log(LOG_DEBUG, "%s; %s: Listen socket: "
1207 "SYN|ACK invalid, segment rejected\n",
1208 s, __func__);
1209 syncache_badack(&inc, port); /* XXX: Not needed! */
1210 TCPSTAT_INC(tcps_badsyn);
1211 rstreason = BANDLIM_RST_OPENPORT;
1212 goto dropwithreset;
1213 }
1214 /*
1215 * If the drop_synfin option is enabled, drop all
1216 * segments with both the SYN and FIN bits set.
1217 * This prevents e.g. nmap from identifying the
1218 * TCP/IP stack.
1219 * XXX: Poor reasoning. nmap has other methods
1220 * and is constantly refining its stack detection
1221 * strategies.
1222 * XXX: This is a violation of the TCP specification
1223 * and was used by RFC1644.
1224 */
1225 if ((thflags & TH_FIN) && V_drop_synfin) {
1226 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1227 log(LOG_DEBUG, "%s; %s: Listen socket: "
1228 "SYN|FIN segment ignored (based on "
1229 "sysctl setting)\n", s, __func__);
1230 TCPSTAT_INC(tcps_badsyn);
1231 goto dropunlock;
1232 }
1233 /*
1234 * Segment's flags are (SYN) or (SYN|FIN).
1235 *
1236 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1237 * as they do not affect the state of the TCP FSM.
1238 * The data pointed to by TH_URG and th_urp is ignored.
1239 */
1240 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1241 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1242 KASSERT(thflags & (TH_SYN),
1243 ("%s: Listen socket: TH_SYN not set", __func__));
1244 INP_RLOCK_ASSERT(inp);
1245 #ifdef INET6
1246 /*
1247 * If deprecated address is forbidden,
1248 * we do not accept SYN to deprecated interface
1249 * address to prevent any new inbound connection from
1250 * getting established.
1251 * When we do not accept SYN, we send a TCP RST,
1252 * with deprecated source address (instead of dropping
1253 * it). We compromise it as it is much better for peer
1254 * to send a RST, and RST will be the final packet
1255 * for the exchange.
1256 *
1257 * If we do not forbid deprecated addresses, we accept
1258 * the SYN packet. RFC2462 does not suggest dropping
1259 * SYN in this case.
1260 * If we decipher RFC2462 5.5.4, it says like this:
1261 * 1. use of deprecated addr with existing
1262 * communication is okay - "SHOULD continue to be
1263 * used"
1264 * 2. use of it with new communication:
1265 * (2a) "SHOULD NOT be used if alternate address
1266 * with sufficient scope is available"
1267 * (2b) nothing mentioned otherwise.
1268 * Here we fall into (2b) case as we have no choice in
1269 * our source address selection - we must obey the peer.
1270 *
1271 * The wording in RFC2462 is confusing, and there are
1272 * multiple description text for deprecated address
1273 * handling - worse, they are not exactly the same.
1274 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1275 */
1276 if (isipv6 && !V_ip6_use_deprecated) {
1277 struct in6_ifaddr *ia6;
1278
1279 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
1280 if (ia6 != NULL &&
1281 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1282 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1283 log(LOG_DEBUG, "%s; %s: Listen socket: "
1284 "Connection attempt to deprecated "
1285 "IPv6 address rejected\n",
1286 s, __func__);
1287 rstreason = BANDLIM_RST_OPENPORT;
1288 goto dropwithreset;
1289 }
1290 }
1291 #endif /* INET6 */
1292 /*
1293 * Basic sanity checks on incoming SYN requests:
1294 * Don't respond if the destination is a link layer
1295 * broadcast according to RFC1122 4.2.3.10, p. 104.
1296 * If it is from this socket it must be forged.
1297 * Don't respond if the source or destination is a
1298 * global or subnet broad- or multicast address.
1299 * Note that it is quite possible to receive unicast
1300 * link-layer packets with a broadcast IP address. Use
1301 * in_broadcast() to find them.
1302 */
1303 if (m->m_flags & (M_BCAST|M_MCAST)) {
1304 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1305 log(LOG_DEBUG, "%s; %s: Listen socket: "
1306 "Connection attempt from broad- or multicast "
1307 "link layer address ignored\n", s, __func__);
1308 goto dropunlock;
1309 }
1310 #ifdef INET6
1311 if (isipv6) {
1312 if (th->th_dport == th->th_sport &&
1313 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1314 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1315 log(LOG_DEBUG, "%s; %s: Listen socket: "
1316 "Connection attempt to/from self "
1317 "ignored\n", s, __func__);
1318 goto dropunlock;
1319 }
1320 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1321 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1322 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1323 log(LOG_DEBUG, "%s; %s: Listen socket: "
1324 "Connection attempt from/to multicast "
1325 "address ignored\n", s, __func__);
1326 goto dropunlock;
1327 }
1328 }
1329 #endif
1330 #if defined(INET) && defined(INET6)
1331 else
1332 #endif
1333 #ifdef INET
1334 {
1335 if (th->th_dport == th->th_sport &&
1336 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1337 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1338 log(LOG_DEBUG, "%s; %s: Listen socket: "
1339 "Connection attempt from/to self "
1340 "ignored\n", s, __func__);
1341 goto dropunlock;
1342 }
1343 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1344 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1345 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1346 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1347 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1348 log(LOG_DEBUG, "%s; %s: Listen socket: "
1349 "Connection attempt from/to broad- "
1350 "or multicast address ignored\n",
1351 s, __func__);
1352 goto dropunlock;
1353 }
1354 }
1355 #endif
1356 /*
1357 * SYN appears to be valid. Create compressed TCP state
1358 * for syncache.
1359 */
1360 TCP_PROBE3(debug__input, tp, th, m);
1361 tcp_dooptions(&to, optp, optlen, TO_SYN);
1362 if ((so = syncache_add(&inc, &to, th, inp, so, m, NULL, NULL,
1363 iptos, port)) != NULL)
1364 goto tfo_socket_result;
1365
1366 /*
1367 * Entry added to syncache and mbuf consumed.
1368 * Only the listen socket is unlocked by syncache_add().
1369 */
1370 return (IPPROTO_DONE);
1371 } else if (tp->t_state == TCPS_LISTEN) {
1372 /*
1373 * When a listen socket is torn down the SO_ACCEPTCONN
1374 * flag is removed first while connections are drained
1375 * from the accept queue in a unlock/lock cycle of the
1376 * ACCEPT_LOCK, opening a race condition allowing a SYN
1377 * attempt go through unhandled.
1378 */
1379 goto dropunlock;
1380 }
1381 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1382 if (tp->t_flags & TF_SIGNATURE) {
1383 tcp_dooptions(&to, optp, optlen, thflags);
1384 if ((to.to_flags & TOF_SIGNATURE) == 0) {
1385 TCPSTAT_INC(tcps_sig_err_nosigopt);
1386 goto dropunlock;
1387 }
1388 if (!TCPMD5_ENABLED() ||
1389 TCPMD5_INPUT(m, th, to.to_signature) != 0)
1390 goto dropunlock;
1391 }
1392 #endif
1393 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1394
1395 /*
1396 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1397 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1398 * the inpcb, and unlocks pcbinfo.
1399 *
1400 * XXXGL: in case of a pure SYN arriving on existing connection
1401 * TCP stacks won't need to modify the PCB, they would either drop
1402 * the segment silently, or send a challenge ACK. However, we try
1403 * to upgrade the lock, because calling convention for stacks is
1404 * write-lock on PCB. If upgrade fails, drop the SYN.
1405 */
1406 if ((lookupflag & INPLOOKUP_RLOCKPCB) && INP_TRY_UPGRADE(inp) == 0)
1407 goto dropunlock;
1408
1409 tp->t_fb->tfb_tcp_do_segment(tp, m, th, drop_hdrlen, tlen, iptos);
1410 return (IPPROTO_DONE);
1411
1412 dropwithreset:
1413 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1414
1415 if (inp != NULL) {
1416 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1417 INP_UNLOCK(inp);
1418 } else
1419 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1420 m = NULL; /* mbuf chain got consumed. */
1421 goto drop;
1422
1423 dropunlock:
1424 if (m != NULL)
1425 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1426
1427 if (inp != NULL)
1428 INP_UNLOCK(inp);
1429
1430 drop:
1431 if (s != NULL)
1432 free(s, M_TCPLOG);
1433 if (m != NULL)
1434 m_freem(m);
1435 return (IPPROTO_DONE);
1436 }
1437
1438 /*
1439 * Automatic sizing of receive socket buffer. Often the send
1440 * buffer size is not optimally adjusted to the actual network
1441 * conditions at hand (delay bandwidth product). Setting the
1442 * buffer size too small limits throughput on links with high
1443 * bandwidth and high delay (eg. trans-continental/oceanic links).
1444 *
1445 * On the receive side the socket buffer memory is only rarely
1446 * used to any significant extent. This allows us to be much
1447 * more aggressive in scaling the receive socket buffer. For
1448 * the case that the buffer space is actually used to a large
1449 * extent and we run out of kernel memory we can simply drop
1450 * the new segments; TCP on the sender will just retransmit it
1451 * later. Setting the buffer size too big may only consume too
1452 * much kernel memory if the application doesn't read() from
1453 * the socket or packet loss or reordering makes use of the
1454 * reassembly queue.
1455 *
1456 * The criteria to step up the receive buffer one notch are:
1457 * 1. Application has not set receive buffer size with
1458 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1459 * 2. the number of bytes received during 1/2 of an sRTT
1460 * is at least 3/8 of the current socket buffer size.
1461 * 3. receive buffer size has not hit maximal automatic size;
1462 *
1463 * If all of the criteria are met we increaset the socket buffer
1464 * by a 1/2 (bounded by the max). This allows us to keep ahead
1465 * of slow-start but also makes it so our peer never gets limited
1466 * by our rwnd which we then open up causing a burst.
1467 *
1468 * This algorithm does two steps per RTT at most and only if
1469 * we receive a bulk stream w/o packet losses or reorderings.
1470 * Shrinking the buffer during idle times is not necessary as
1471 * it doesn't consume any memory when idle.
1472 *
1473 * TODO: Only step up if the application is actually serving
1474 * the buffer to better manage the socket buffer resources.
1475 */
1476 int
tcp_autorcvbuf(struct mbuf * m,struct tcphdr * th,struct socket * so,struct tcpcb * tp,int tlen)1477 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so,
1478 struct tcpcb *tp, int tlen)
1479 {
1480 int newsize = 0;
1481
1482 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) &&
1483 tp->t_srtt != 0 && tp->rfbuf_ts != 0 &&
1484 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) >
1485 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) {
1486 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) &&
1487 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) {
1488 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max);
1489 }
1490 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize);
1491
1492 /* Start over with next RTT. */
1493 tp->rfbuf_ts = 0;
1494 tp->rfbuf_cnt = 0;
1495 } else {
1496 tp->rfbuf_cnt += tlen; /* add up */
1497 }
1498 return (newsize);
1499 }
1500
1501 int
tcp_input(struct mbuf ** mp,int * offp,int proto)1502 tcp_input(struct mbuf **mp, int *offp, int proto)
1503 {
1504 return(tcp_input_with_port(mp, offp, proto, 0));
1505 }
1506
1507 static void
tcp_handle_wakeup(struct tcpcb * tp)1508 tcp_handle_wakeup(struct tcpcb *tp)
1509 {
1510
1511 INP_WLOCK_ASSERT(tptoinpcb(tp));
1512
1513 if (tp->t_flags & TF_WAKESOR) {
1514 struct socket *so = tptosocket(tp);
1515
1516 tp->t_flags &= ~TF_WAKESOR;
1517 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1518 sorwakeup_locked(so);
1519 }
1520 }
1521
1522 void
tcp_do_segment(struct tcpcb * tp,struct mbuf * m,struct tcphdr * th,int drop_hdrlen,int tlen,uint8_t iptos)1523 tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1524 int drop_hdrlen, int tlen, uint8_t iptos)
1525 {
1526 uint16_t thflags;
1527 int acked, ourfinisacked, needoutput = 0;
1528 sackstatus_t sack_changed;
1529 int rstreason, todrop, win, incforsyn = 0;
1530 uint32_t tiwin;
1531 uint16_t nsegs;
1532 char *s;
1533 struct inpcb *inp = tptoinpcb(tp);
1534 struct socket *so = tptosocket(tp);
1535 struct in_conninfo *inc = &inp->inp_inc;
1536 struct mbuf *mfree;
1537 struct tcpopt to;
1538 int tfo_syn;
1539 u_int maxseg = 0;
1540
1541 thflags = tcp_get_flags(th);
1542 tp->sackhint.last_sack_ack = 0;
1543 sack_changed = SACK_NOCHANGE;
1544 nsegs = max(1, m->m_pkthdr.lro_nsegs);
1545
1546 NET_EPOCH_ASSERT();
1547 INP_WLOCK_ASSERT(inp);
1548 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1549 __func__));
1550 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1551 __func__));
1552
1553 #ifdef TCPPCAP
1554 /* Save segment, if requested. */
1555 tcp_pcap_add(th, m, &(tp->t_inpkts));
1556 #endif
1557 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
1558 tlen, NULL, true);
1559
1560 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
1561 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1562 log(LOG_DEBUG, "%s; %s: "
1563 "SYN|FIN segment ignored (based on "
1564 "sysctl setting)\n", s, __func__);
1565 free(s, M_TCPLOG);
1566 }
1567 goto drop;
1568 }
1569
1570 /*
1571 * If a segment with the ACK-bit set arrives in the SYN-SENT state
1572 * check SEQ.ACK first.
1573 */
1574 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
1575 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
1576 rstreason = BANDLIM_UNLIMITED;
1577 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
1578 goto dropwithreset;
1579 }
1580
1581 /*
1582 * Segment received on connection.
1583 * Reset idle time and keep-alive timer.
1584 * XXX: This should be done after segment
1585 * validation to ignore broken/spoofed segs.
1586 */
1587 if (tp->t_idle_reduce &&
1588 (tp->snd_max == tp->snd_una) &&
1589 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
1590 cc_after_idle(tp);
1591 tp->t_rcvtime = ticks;
1592
1593 if (thflags & TH_FIN)
1594 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
1595 /*
1596 * Scale up the window into a 32-bit value.
1597 * For the SYN_SENT state the scale is zero.
1598 */
1599 tiwin = th->th_win << tp->snd_scale;
1600 #ifdef STATS
1601 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
1602 #endif
1603
1604 /*
1605 * TCP ECN processing.
1606 */
1607 if (tcp_ecn_input_segment(tp, thflags, tlen,
1608 tcp_packets_this_ack(tp, th->th_ack),
1609 iptos))
1610 cc_cong_signal(tp, th, CC_ECN);
1611
1612 /*
1613 * Parse options on any incoming segment.
1614 */
1615 tcp_dooptions(&to, (u_char *)(th + 1),
1616 (th->th_off << 2) - sizeof(struct tcphdr),
1617 (thflags & TH_SYN) ? TO_SYN : 0);
1618 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) {
1619 /*
1620 * We don't look at sack's from the
1621 * peer because the MSS is too small which
1622 * can subject us to an attack.
1623 */
1624 to.to_flags &= ~TOF_SACK;
1625 }
1626 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1627 if ((tp->t_flags & TF_SIGNATURE) != 0 &&
1628 (to.to_flags & TOF_SIGNATURE) == 0) {
1629 TCPSTAT_INC(tcps_sig_err_sigopt);
1630 /* XXX: should drop? */
1631 }
1632 #endif
1633 /*
1634 * If echoed timestamp is later than the current time,
1635 * fall back to non RFC1323 RTT calculation. Normalize
1636 * timestamp if syncookies were used when this connection
1637 * was established.
1638 */
1639 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1640 to.to_tsecr -= tp->ts_offset;
1641 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) {
1642 to.to_tsecr = 0;
1643 } else if (tp->t_rxtshift == 1 &&
1644 tp->t_flags & TF_PREVVALID &&
1645 tp->t_badrxtwin != 0 &&
1646 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin)) {
1647 cc_cong_signal(tp, th, CC_RTO_ERR);
1648 }
1649 }
1650 /*
1651 * Process options only when we get SYN/ACK back. The SYN case
1652 * for incoming connections is handled in tcp_syncache.
1653 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1654 * or <SYN,ACK>) segment itself is never scaled.
1655 * XXX this is traditional behavior, may need to be cleaned up.
1656 */
1657 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1658 /* Handle parallel SYN for ECN */
1659 tcp_ecn_input_parallel_syn(tp, thflags, iptos);
1660 if ((to.to_flags & TOF_SCALE) &&
1661 (tp->t_flags & TF_REQ_SCALE) &&
1662 !(tp->t_flags & TF_NOOPT)) {
1663 tp->t_flags |= TF_RCVD_SCALE;
1664 tp->snd_scale = to.to_wscale;
1665 } else {
1666 tp->t_flags &= ~TF_REQ_SCALE;
1667 }
1668 /*
1669 * Initial send window. It will be updated with
1670 * the next incoming segment to the scaled value.
1671 */
1672 tp->snd_wnd = th->th_win;
1673 if ((to.to_flags & TOF_TS) &&
1674 (tp->t_flags & TF_REQ_TSTMP) &&
1675 !(tp->t_flags & TF_NOOPT)) {
1676 tp->t_flags |= TF_RCVD_TSTMP;
1677 tp->ts_recent = to.to_tsval;
1678 tp->ts_recent_age = tcp_ts_getticks();
1679 } else {
1680 tp->t_flags &= ~TF_REQ_TSTMP;
1681 }
1682 if (to.to_flags & TOF_MSS) {
1683 tcp_mss(tp, to.to_mss);
1684 }
1685 if ((tp->t_flags & TF_SACK_PERMIT) &&
1686 (!(to.to_flags & TOF_SACKPERM) ||
1687 (tp->t_flags & TF_NOOPT))) {
1688 tp->t_flags &= ~TF_SACK_PERMIT;
1689 }
1690 if (tp->t_flags & TF_FASTOPEN) {
1691 if ((to.to_flags & TOF_FASTOPEN) &&
1692 !(tp->t_flags & TF_NOOPT)) {
1693 uint16_t mss;
1694
1695 if (to.to_flags & TOF_MSS) {
1696 mss = to.to_mss;
1697 } else {
1698 if ((inp->inp_vflag & INP_IPV6) != 0) {
1699 mss = TCP6_MSS;
1700 } else {
1701 mss = TCP_MSS;
1702 }
1703 }
1704 tcp_fastopen_update_cache(tp, mss,
1705 to.to_tfo_len, to.to_tfo_cookie);
1706 } else {
1707 tcp_fastopen_disable_path(tp);
1708 }
1709 }
1710 }
1711
1712 /*
1713 * If timestamps were negotiated during SYN/ACK and a
1714 * segment without a timestamp is received, silently drop
1715 * the segment, unless it is a RST segment or missing timestamps are
1716 * tolerated.
1717 * See section 3.2 of RFC 7323.
1718 */
1719 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1720 if (((thflags & TH_RST) != 0) || V_tcp_tolerate_missing_ts) {
1721 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1722 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1723 "segment processed normally\n",
1724 s, __func__);
1725 free(s, M_TCPLOG);
1726 }
1727 } else {
1728 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1729 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1730 "segment silently dropped\n", s, __func__);
1731 free(s, M_TCPLOG);
1732 }
1733 goto drop;
1734 }
1735 }
1736 /*
1737 * If timestamps were not negotiated during SYN/ACK and a
1738 * segment with a timestamp is received, ignore the
1739 * timestamp and process the packet normally.
1740 * See section 3.2 of RFC 7323.
1741 */
1742 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1743 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1744 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1745 "segment processed normally\n", s, __func__);
1746 free(s, M_TCPLOG);
1747 }
1748 }
1749
1750 /*
1751 * Header prediction: check for the two common cases
1752 * of a uni-directional data xfer. If the packet has
1753 * no control flags, is in-sequence, the window didn't
1754 * change and we're not retransmitting, it's a
1755 * candidate. If the length is zero and the ack moved
1756 * forward, we're the sender side of the xfer. Just
1757 * free the data acked & wake any higher level process
1758 * that was blocked waiting for space. If the length
1759 * is non-zero and the ack didn't move, we're the
1760 * receiver side. If we're getting packets in-order
1761 * (the reassembly queue is empty), add the data to
1762 * the socket buffer and note that we need a delayed ack.
1763 * Make sure that the hidden state-flags are also off.
1764 * Since we check for TCPS_ESTABLISHED first, it can only
1765 * be TH_NEEDSYN.
1766 */
1767 if (tp->t_state == TCPS_ESTABLISHED &&
1768 th->th_seq == tp->rcv_nxt &&
1769 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1770 tp->snd_nxt == tp->snd_max &&
1771 tiwin && tiwin == tp->snd_wnd &&
1772 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1773 SEGQ_EMPTY(tp) &&
1774 ((to.to_flags & TOF_TS) == 0 ||
1775 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1776 /*
1777 * If last ACK falls within this segment's sequence numbers,
1778 * record the timestamp.
1779 * NOTE that the test is modified according to the latest
1780 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1781 */
1782 if ((to.to_flags & TOF_TS) != 0 &&
1783 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1784 tp->ts_recent_age = tcp_ts_getticks();
1785 tp->ts_recent = to.to_tsval;
1786 }
1787
1788 if (tlen == 0) {
1789 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1790 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1791 !IN_RECOVERY(tp->t_flags) &&
1792 (to.to_flags & TOF_SACK) == 0 &&
1793 TAILQ_EMPTY(&tp->snd_holes)) {
1794 /*
1795 * This is a pure ack for outstanding data.
1796 */
1797 TCPSTAT_INC(tcps_predack);
1798
1799 /*
1800 * "bad retransmit" recovery without timestamps.
1801 */
1802 if ((to.to_flags & TOF_TS) == 0 &&
1803 tp->t_rxtshift == 1 &&
1804 tp->t_flags & TF_PREVVALID &&
1805 tp->t_badrxtwin != 0 &&
1806 TSTMP_LT(ticks, tp->t_badrxtwin)) {
1807 cc_cong_signal(tp, th, CC_RTO_ERR);
1808 }
1809
1810 /*
1811 * Recalculate the transmit timer / rtt.
1812 *
1813 * Some boxes send broken timestamp replies
1814 * during the SYN+ACK phase, ignore
1815 * timestamps of 0 or we could calculate a
1816 * huge RTT and blow up the retransmit timer.
1817 */
1818 if ((to.to_flags & TOF_TS) != 0 &&
1819 to.to_tsecr) {
1820 uint32_t t;
1821
1822 t = tcp_ts_getticks() - to.to_tsecr;
1823 if (!tp->t_rttlow || tp->t_rttlow > t)
1824 tp->t_rttlow = t;
1825 tcp_xmit_timer(tp,
1826 TCP_TS_TO_TICKS(t) + 1);
1827 } else if (tp->t_rtttime &&
1828 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1829 if (!tp->t_rttlow ||
1830 tp->t_rttlow > ticks - tp->t_rtttime)
1831 tp->t_rttlow = ticks - tp->t_rtttime;
1832 tcp_xmit_timer(tp,
1833 ticks - tp->t_rtttime);
1834 }
1835 acked = BYTES_THIS_ACK(tp, th);
1836
1837 #ifdef TCP_HHOOK
1838 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1839 hhook_run_tcp_est_in(tp, th, &to);
1840 #endif
1841
1842 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
1843 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1844 sbdrop(&so->so_snd, acked);
1845 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1846 SEQ_LEQ(th->th_ack, tp->snd_recover))
1847 tp->snd_recover = th->th_ack - 1;
1848
1849 /*
1850 * Let the congestion control algorithm update
1851 * congestion control related information. This
1852 * typically means increasing the congestion
1853 * window.
1854 */
1855 cc_ack_received(tp, th, nsegs, CC_ACK);
1856
1857 tp->snd_una = th->th_ack;
1858 /*
1859 * Pull snd_wl2 up to prevent seq wrap relative
1860 * to th_ack.
1861 */
1862 tp->snd_wl2 = th->th_ack;
1863 tp->t_dupacks = 0;
1864 m_freem(m);
1865
1866 /*
1867 * If all outstanding data are acked, stop
1868 * retransmit timer, otherwise restart timer
1869 * using current (possibly backed-off) value.
1870 * If process is waiting for space,
1871 * wakeup/selwakeup/signal. If data
1872 * are ready to send, let tcp_output
1873 * decide between more output or persist.
1874 */
1875 TCP_PROBE3(debug__input, tp, th, m);
1876 /*
1877 * Clear t_acktime if remote side has ACKd
1878 * all data in the socket buffer.
1879 * Otherwise, update t_acktime if we received
1880 * a sufficiently large ACK.
1881 */
1882 if (sbavail(&so->so_snd) == 0)
1883 tp->t_acktime = 0;
1884 else if (acked > 1)
1885 tp->t_acktime = ticks;
1886 if (tp->snd_una == tp->snd_max)
1887 tcp_timer_activate(tp, TT_REXMT, 0);
1888 else if (!tcp_timer_active(tp, TT_PERSIST))
1889 tcp_timer_activate(tp, TT_REXMT,
1890 TP_RXTCUR(tp));
1891 sowwakeup(so);
1892 /*
1893 * Only call tcp_output when there
1894 * is new data available to be sent
1895 * or we need to send an ACK.
1896 */
1897 if ((tp->t_flags & TF_ACKNOW) ||
1898 (sbavail(&so->so_snd) >=
1899 SEQ_SUB(tp->snd_max, tp->snd_una))) {
1900 (void) tcp_output(tp);
1901 }
1902 goto check_delack;
1903 }
1904 } else if (th->th_ack == tp->snd_una &&
1905 tlen <= sbspace(&so->so_rcv)) {
1906 int newsize = 0; /* automatic sockbuf scaling */
1907
1908 /*
1909 * This is a pure, in-sequence data packet with
1910 * nothing on the reassembly queue and we have enough
1911 * buffer space to take it.
1912 */
1913 /* Clean receiver SACK report if present */
1914 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1915 tcp_clean_sackreport(tp);
1916 TCPSTAT_INC(tcps_preddat);
1917 tp->rcv_nxt += tlen;
1918 if (tlen &&
1919 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
1920 (tp->t_fbyte_in == 0)) {
1921 tp->t_fbyte_in = ticks;
1922 if (tp->t_fbyte_in == 0)
1923 tp->t_fbyte_in = 1;
1924 if (tp->t_fbyte_out && tp->t_fbyte_in)
1925 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
1926 }
1927 /*
1928 * Pull snd_wl1 up to prevent seq wrap relative to
1929 * th_seq.
1930 */
1931 tp->snd_wl1 = th->th_seq;
1932 /*
1933 * Pull rcv_up up to prevent seq wrap relative to
1934 * rcv_nxt.
1935 */
1936 tp->rcv_up = tp->rcv_nxt;
1937 TCPSTAT_ADD(tcps_rcvpack, nsegs);
1938 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1939 TCP_PROBE3(debug__input, tp, th, m);
1940
1941 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
1942
1943 /* Add data to socket buffer. */
1944 SOCKBUF_LOCK(&so->so_rcv);
1945 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1946 m_freem(m);
1947 } else {
1948 /*
1949 * Set new socket buffer size.
1950 * Give up when limit is reached.
1951 */
1952 if (newsize)
1953 if (!sbreserve_locked(so, SO_RCV,
1954 newsize, NULL))
1955 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1956 m_adj(m, drop_hdrlen); /* delayed header drop */
1957 sbappendstream_locked(&so->so_rcv, m, 0);
1958 }
1959 /* NB: sorwakeup_locked() does an implicit unlock. */
1960 sorwakeup_locked(so);
1961 if (DELAY_ACK(tp, tlen)) {
1962 tp->t_flags |= TF_DELACK;
1963 } else {
1964 tp->t_flags |= TF_ACKNOW;
1965 (void) tcp_output(tp);
1966 }
1967 goto check_delack;
1968 }
1969 }
1970
1971 /*
1972 * Calculate amount of space in receive window,
1973 * and then do TCP input processing.
1974 * Receive window is amount of space in rcv queue,
1975 * but not less than advertised window.
1976 */
1977 win = sbspace(&so->so_rcv);
1978 if (win < 0)
1979 win = 0;
1980 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1981
1982 switch (tp->t_state) {
1983 /*
1984 * If the state is SYN_RECEIVED:
1985 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1986 */
1987 case TCPS_SYN_RECEIVED:
1988 if (thflags & TH_RST) {
1989 /* Handle RST segments later. */
1990 break;
1991 }
1992 if ((thflags & TH_ACK) &&
1993 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1994 SEQ_GT(th->th_ack, tp->snd_max))) {
1995 rstreason = BANDLIM_RST_OPENPORT;
1996 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
1997 goto dropwithreset;
1998 }
1999 if (tp->t_flags & TF_FASTOPEN) {
2000 /*
2001 * When a TFO connection is in SYN_RECEIVED, the
2002 * only valid packets are the initial SYN, a
2003 * retransmit/copy of the initial SYN (possibly with
2004 * a subset of the original data), a valid ACK, a
2005 * FIN, or a RST.
2006 */
2007 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
2008 rstreason = BANDLIM_RST_OPENPORT;
2009 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
2010 goto dropwithreset;
2011 } else if (thflags & TH_SYN) {
2012 /* non-initial SYN is ignored */
2013 if ((tcp_timer_active(tp, TT_DELACK) ||
2014 tcp_timer_active(tp, TT_REXMT)))
2015 goto drop;
2016 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
2017 goto drop;
2018 }
2019 }
2020 break;
2021
2022 /*
2023 * If the state is SYN_SENT:
2024 * if seg contains a RST with valid ACK (SEQ.ACK has already
2025 * been verified), then drop the connection.
2026 * if seg contains a RST without an ACK, drop the seg.
2027 * if seg does not contain SYN, then drop the seg.
2028 * Otherwise this is an acceptable SYN segment
2029 * initialize tp->rcv_nxt and tp->irs
2030 * if seg contains ack then advance tp->snd_una
2031 * if seg contains an ECE and ECN support is enabled, the stream
2032 * is ECN capable.
2033 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
2034 * arrange for segment to be acked (eventually)
2035 * continue processing rest of data/controls, beginning with URG
2036 */
2037 case TCPS_SYN_SENT:
2038 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
2039 TCP_PROBE5(connect__refused, NULL, tp,
2040 m, tp, th);
2041 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
2042 tp = tcp_drop(tp, ECONNREFUSED);
2043 }
2044 if (thflags & TH_RST)
2045 goto drop;
2046 if (!(thflags & TH_SYN))
2047 goto drop;
2048
2049 tp->irs = th->th_seq;
2050 tcp_rcvseqinit(tp);
2051 if (thflags & TH_ACK) {
2052 int tfo_partial_ack = 0;
2053
2054 TCPSTAT_INC(tcps_connects);
2055 soisconnected(so);
2056 #ifdef MAC
2057 mac_socketpeer_set_from_mbuf(m, so);
2058 #endif
2059 /* Do window scaling on this connection? */
2060 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2061 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2062 tp->rcv_scale = tp->request_r_scale;
2063 }
2064 tp->rcv_adv += min(tp->rcv_wnd,
2065 TCP_MAXWIN << tp->rcv_scale);
2066 tp->snd_una++; /* SYN is acked */
2067 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2068 tp->snd_nxt = tp->snd_una;
2069 /*
2070 * If not all the data that was sent in the TFO SYN
2071 * has been acked, resend the remainder right away.
2072 */
2073 if ((tp->t_flags & TF_FASTOPEN) &&
2074 (tp->snd_una != tp->snd_max)) {
2075 tp->snd_nxt = th->th_ack;
2076 tfo_partial_ack = 1;
2077 }
2078 /*
2079 * If there's data, delay ACK; if there's also a FIN
2080 * ACKNOW will be turned on later.
2081 */
2082 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack)
2083 tcp_timer_activate(tp, TT_DELACK,
2084 tcp_delacktime);
2085 else
2086 tp->t_flags |= TF_ACKNOW;
2087
2088 tcp_ecn_input_syn_sent(tp, thflags, iptos);
2089
2090 /*
2091 * Received <SYN,ACK> in SYN_SENT[*] state.
2092 * Transitions:
2093 * SYN_SENT --> ESTABLISHED
2094 * SYN_SENT* --> FIN_WAIT_1
2095 */
2096 tp->t_starttime = ticks;
2097 if (tp->t_flags & TF_NEEDFIN) {
2098 tp->t_acktime = ticks;
2099 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2100 tp->t_flags &= ~TF_NEEDFIN;
2101 thflags &= ~TH_SYN;
2102 } else {
2103 tcp_state_change(tp, TCPS_ESTABLISHED);
2104 TCP_PROBE5(connect__established, NULL, tp,
2105 m, tp, th);
2106 cc_conn_init(tp);
2107 tcp_timer_activate(tp, TT_KEEP,
2108 TP_KEEPIDLE(tp));
2109 }
2110 } else {
2111 /*
2112 * Received initial SYN in SYN-SENT[*] state =>
2113 * simultaneous open.
2114 * If it succeeds, connection is * half-synchronized.
2115 * Otherwise, do 3-way handshake:
2116 * SYN-SENT -> SYN-RECEIVED
2117 * SYN-SENT* -> SYN-RECEIVED*
2118 */
2119 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
2120 tcp_timer_activate(tp, TT_REXMT, 0);
2121 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2122 }
2123
2124 /*
2125 * Advance th->th_seq to correspond to first data byte.
2126 * If data, trim to stay within window,
2127 * dropping FIN if necessary.
2128 */
2129 th->th_seq++;
2130 if (tlen > tp->rcv_wnd) {
2131 todrop = tlen - tp->rcv_wnd;
2132 m_adj(m, -todrop);
2133 tlen = tp->rcv_wnd;
2134 thflags &= ~TH_FIN;
2135 TCPSTAT_INC(tcps_rcvpackafterwin);
2136 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2137 }
2138 tp->snd_wl1 = th->th_seq - 1;
2139 tp->rcv_up = th->th_seq;
2140 /*
2141 * Client side of transaction: already sent SYN and data.
2142 * If the remote host used T/TCP to validate the SYN,
2143 * our data will be ACK'd; if so, enter normal data segment
2144 * processing in the middle of step 5, ack processing.
2145 * Otherwise, goto step 6.
2146 */
2147 if (thflags & TH_ACK)
2148 goto process_ACK;
2149
2150 goto step6;
2151 }
2152
2153 /*
2154 * States other than LISTEN or SYN_SENT.
2155 * First check the RST flag and sequence number since reset segments
2156 * are exempt from the timestamp and connection count tests. This
2157 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2158 * below which allowed reset segments in half the sequence space
2159 * to fall though and be processed (which gives forged reset
2160 * segments with a random sequence number a 50 percent chance of
2161 * killing a connection).
2162 * Then check timestamp, if present.
2163 * Then check the connection count, if present.
2164 * Then check that at least some bytes of segment are within
2165 * receive window. If segment begins before rcv_nxt,
2166 * drop leading data (and SYN); if nothing left, just ack.
2167 */
2168 if (thflags & TH_RST) {
2169 /*
2170 * RFC5961 Section 3.2
2171 *
2172 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2173 * - If RST is in window, we send challenge ACK.
2174 *
2175 * Note: to take into account delayed ACKs, we should
2176 * test against last_ack_sent instead of rcv_nxt.
2177 * Note 2: we handle special case of closed window, not
2178 * covered by the RFC.
2179 */
2180 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2181 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2182 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2183 KASSERT(tp->t_state != TCPS_SYN_SENT,
2184 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2185 __func__, th, tp));
2186
2187 if (V_tcp_insecure_rst ||
2188 tp->last_ack_sent == th->th_seq) {
2189 TCPSTAT_INC(tcps_drops);
2190 /* Drop the connection. */
2191 switch (tp->t_state) {
2192 case TCPS_SYN_RECEIVED:
2193 so->so_error = ECONNREFUSED;
2194 goto close;
2195 case TCPS_ESTABLISHED:
2196 case TCPS_FIN_WAIT_1:
2197 case TCPS_FIN_WAIT_2:
2198 case TCPS_CLOSE_WAIT:
2199 case TCPS_CLOSING:
2200 case TCPS_LAST_ACK:
2201 so->so_error = ECONNRESET;
2202 close:
2203 /* FALLTHROUGH */
2204 default:
2205 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_RST);
2206 tp = tcp_close(tp);
2207 }
2208 } else {
2209 TCPSTAT_INC(tcps_badrst);
2210 /* Send challenge ACK. */
2211 tcp_respond(tp, mtod(m, void *), th, m,
2212 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2213 tp->last_ack_sent = tp->rcv_nxt;
2214 m = NULL;
2215 }
2216 }
2217 goto drop;
2218 }
2219
2220 /*
2221 * RFC5961 Section 4.2
2222 * Send challenge ACK for any SYN in synchronized state.
2223 */
2224 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT &&
2225 tp->t_state != TCPS_SYN_RECEIVED) {
2226 TCPSTAT_INC(tcps_badsyn);
2227 if (V_tcp_insecure_syn &&
2228 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2229 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2230 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
2231 tp = tcp_drop(tp, ECONNRESET);
2232 rstreason = BANDLIM_UNLIMITED;
2233 } else {
2234 tcp_ecn_input_syn_sent(tp, thflags, iptos);
2235 /* Send challenge ACK. */
2236 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2237 tp->snd_nxt, TH_ACK);
2238 tp->last_ack_sent = tp->rcv_nxt;
2239 m = NULL;
2240 }
2241 goto drop;
2242 }
2243
2244 /*
2245 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2246 * and it's less than ts_recent, drop it.
2247 */
2248 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2249 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2250 /* Check to see if ts_recent is over 24 days old. */
2251 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2252 /*
2253 * Invalidate ts_recent. If this segment updates
2254 * ts_recent, the age will be reset later and ts_recent
2255 * will get a valid value. If it does not, setting
2256 * ts_recent to zero will at least satisfy the
2257 * requirement that zero be placed in the timestamp
2258 * echo reply when ts_recent isn't valid. The
2259 * age isn't reset until we get a valid ts_recent
2260 * because we don't want out-of-order segments to be
2261 * dropped when ts_recent is old.
2262 */
2263 tp->ts_recent = 0;
2264 } else {
2265 TCPSTAT_INC(tcps_rcvduppack);
2266 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2267 TCPSTAT_INC(tcps_pawsdrop);
2268 if (tlen)
2269 goto dropafterack;
2270 goto drop;
2271 }
2272 }
2273
2274 /*
2275 * In the SYN-RECEIVED state, validate that the packet belongs to
2276 * this connection before trimming the data to fit the receive
2277 * window. Check the sequence number versus IRS since we know
2278 * the sequence numbers haven't wrapped. This is a partial fix
2279 * for the "LAND" DoS attack.
2280 */
2281 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2282 rstreason = BANDLIM_RST_OPENPORT;
2283 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
2284 goto dropwithreset;
2285 }
2286
2287 todrop = tp->rcv_nxt - th->th_seq;
2288 if (todrop > 0) {
2289 if (thflags & TH_SYN) {
2290 thflags &= ~TH_SYN;
2291 th->th_seq++;
2292 if (th->th_urp > 1)
2293 th->th_urp--;
2294 else
2295 thflags &= ~TH_URG;
2296 todrop--;
2297 }
2298 /*
2299 * Following if statement from Stevens, vol. 2, p. 960.
2300 */
2301 if (todrop > tlen
2302 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2303 /*
2304 * Any valid FIN must be to the left of the window.
2305 * At this point the FIN must be a duplicate or out
2306 * of sequence; drop it.
2307 */
2308 thflags &= ~TH_FIN;
2309
2310 /*
2311 * Send an ACK to resynchronize and drop any data.
2312 * But keep on processing for RST or ACK.
2313 */
2314 tp->t_flags |= TF_ACKNOW;
2315 todrop = tlen;
2316 TCPSTAT_INC(tcps_rcvduppack);
2317 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2318 } else {
2319 TCPSTAT_INC(tcps_rcvpartduppack);
2320 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2321 }
2322 /*
2323 * DSACK - add SACK block for dropped range
2324 */
2325 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) {
2326 tcp_update_sack_list(tp, th->th_seq,
2327 th->th_seq + todrop);
2328 /*
2329 * ACK now, as the next in-sequence segment
2330 * will clear the DSACK block again
2331 */
2332 tp->t_flags |= TF_ACKNOW;
2333 }
2334 drop_hdrlen += todrop; /* drop from the top afterwards */
2335 th->th_seq += todrop;
2336 tlen -= todrop;
2337 if (th->th_urp > todrop)
2338 th->th_urp -= todrop;
2339 else {
2340 thflags &= ~TH_URG;
2341 th->th_urp = 0;
2342 }
2343 }
2344
2345 /*
2346 * If new data are received on a connection after the
2347 * user processes are gone, then RST the other end if
2348 * no FIN has been processed.
2349 */
2350 if ((tp->t_flags & TF_CLOSED) && tlen > 0 &&
2351 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2352 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2353 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2354 "after socket was closed, "
2355 "sending RST and removing tcpcb\n",
2356 s, __func__, tcpstates[tp->t_state], tlen);
2357 free(s, M_TCPLOG);
2358 }
2359 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
2360 /* tcp_close will kill the inp pre-log the Reset */
2361 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
2362 tp = tcp_close(tp);
2363 TCPSTAT_INC(tcps_rcvafterclose);
2364 rstreason = BANDLIM_UNLIMITED;
2365 goto dropwithreset;
2366 }
2367
2368 /*
2369 * If segment ends after window, drop trailing data
2370 * (and PUSH and FIN); if nothing left, just ACK.
2371 */
2372 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2373 if (todrop > 0) {
2374 TCPSTAT_INC(tcps_rcvpackafterwin);
2375 if (todrop >= tlen) {
2376 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2377 /*
2378 * If window is closed can only take segments at
2379 * window edge, and have to drop data and PUSH from
2380 * incoming segments. Continue processing, but
2381 * remember to ack. Otherwise, drop segment
2382 * and ack.
2383 */
2384 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2385 tp->t_flags |= TF_ACKNOW;
2386 TCPSTAT_INC(tcps_rcvwinprobe);
2387 } else
2388 goto dropafterack;
2389 } else
2390 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2391 m_adj(m, -todrop);
2392 tlen -= todrop;
2393 thflags &= ~(TH_PUSH|TH_FIN);
2394 }
2395
2396 /*
2397 * If last ACK falls within this segment's sequence numbers,
2398 * record its timestamp.
2399 * NOTE:
2400 * 1) That the test incorporates suggestions from the latest
2401 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2402 * 2) That updating only on newer timestamps interferes with
2403 * our earlier PAWS tests, so this check should be solely
2404 * predicated on the sequence space of this segment.
2405 * 3) That we modify the segment boundary check to be
2406 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2407 * instead of RFC1323's
2408 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2409 * This modified check allows us to overcome RFC1323's
2410 * limitations as described in Stevens TCP/IP Illustrated
2411 * Vol. 2 p.869. In such cases, we can still calculate the
2412 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2413 */
2414 if ((to.to_flags & TOF_TS) != 0 &&
2415 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2416 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2417 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2418 tp->ts_recent_age = tcp_ts_getticks();
2419 tp->ts_recent = to.to_tsval;
2420 }
2421
2422 /*
2423 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2424 * flag is on (half-synchronized state), then queue data for
2425 * later processing; else drop segment and return.
2426 */
2427 if ((thflags & TH_ACK) == 0) {
2428 if (tp->t_state == TCPS_SYN_RECEIVED ||
2429 (tp->t_flags & TF_NEEDSYN)) {
2430 if (tp->t_state == TCPS_SYN_RECEIVED &&
2431 (tp->t_flags & TF_FASTOPEN)) {
2432 tp->snd_wnd = tiwin;
2433 cc_conn_init(tp);
2434 }
2435 goto step6;
2436 } else if (tp->t_flags & TF_ACKNOW)
2437 goto dropafterack;
2438 else
2439 goto drop;
2440 }
2441
2442 /*
2443 * Ack processing.
2444 */
2445 switch (tp->t_state) {
2446 /*
2447 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2448 * ESTABLISHED state and continue processing.
2449 * The ACK was checked above.
2450 */
2451 case TCPS_SYN_RECEIVED:
2452
2453 TCPSTAT_INC(tcps_connects);
2454 if (tp->t_flags & TF_SONOTCONN) {
2455 /*
2456 * Usually SYN_RECEIVED had been created from a LISTEN,
2457 * and solisten_enqueue() has already marked the socket
2458 * layer as connected. If it didn't, which can happen
2459 * only with an accept_filter(9), then the tp is marked
2460 * with TF_SONOTCONN. The other reason for this mark
2461 * to be set is a simultaneous open, a SYN_RECEIVED
2462 * that had been created from SYN_SENT.
2463 */
2464 tp->t_flags &= ~TF_SONOTCONN;
2465 soisconnected(so);
2466 }
2467 /* Do window scaling? */
2468 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2469 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2470 tp->rcv_scale = tp->request_r_scale;
2471 }
2472 tp->snd_wnd = tiwin;
2473 /*
2474 * Make transitions:
2475 * SYN-RECEIVED -> ESTABLISHED
2476 * SYN-RECEIVED* -> FIN-WAIT-1
2477 */
2478 tp->t_starttime = ticks;
2479 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) {
2480 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2481 tp->t_tfo_pending = NULL;
2482 }
2483 if (tp->t_flags & TF_NEEDFIN) {
2484 tp->t_acktime = ticks;
2485 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2486 tp->t_flags &= ~TF_NEEDFIN;
2487 } else {
2488 tcp_state_change(tp, TCPS_ESTABLISHED);
2489 TCP_PROBE5(accept__established, NULL, tp,
2490 m, tp, th);
2491 /*
2492 * TFO connections call cc_conn_init() during SYN
2493 * processing. Calling it again here for such
2494 * connections is not harmless as it would undo the
2495 * snd_cwnd reduction that occurs when a TFO SYN|ACK
2496 * is retransmitted.
2497 */
2498 if (!(tp->t_flags & TF_FASTOPEN))
2499 cc_conn_init(tp);
2500 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2501 }
2502 /*
2503 * Account for the ACK of our SYN prior to
2504 * regular ACK processing below, except for
2505 * simultaneous SYN, which is handled later.
2506 */
2507 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
2508 incforsyn = 1;
2509 /*
2510 * If segment contains data or ACK, will call tcp_reass()
2511 * later; if not, do so now to pass queued data to user.
2512 */
2513 if (tlen == 0 && (thflags & TH_FIN) == 0) {
2514 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
2515 (struct mbuf *)0);
2516 tcp_handle_wakeup(tp);
2517 }
2518 tp->snd_wl1 = th->th_seq - 1;
2519 /* FALLTHROUGH */
2520
2521 /*
2522 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2523 * ACKs. If the ack is in the range
2524 * tp->snd_una < th->th_ack <= tp->snd_max
2525 * then advance tp->snd_una to th->th_ack and drop
2526 * data from the retransmission queue. If this ACK reflects
2527 * more up to date window information we update our window information.
2528 */
2529 case TCPS_ESTABLISHED:
2530 case TCPS_FIN_WAIT_1:
2531 case TCPS_FIN_WAIT_2:
2532 case TCPS_CLOSE_WAIT:
2533 case TCPS_CLOSING:
2534 case TCPS_LAST_ACK:
2535 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2536 TCPSTAT_INC(tcps_rcvacktoomuch);
2537 goto dropafterack;
2538 }
2539 if (tcp_is_sack_recovery(tp, &to)) {
2540 sack_changed = tcp_sack_doack(tp, &to, th->th_ack);
2541 if ((sack_changed != SACK_NOCHANGE) &&
2542 (tp->t_flags & TF_LRD)) {
2543 tcp_sack_lost_retransmission(tp, th);
2544 }
2545 } else
2546 /*
2547 * Reset the value so that previous (valid) value
2548 * from the last ack with SACK doesn't get used.
2549 */
2550 tp->sackhint.sacked_bytes = 0;
2551
2552 #ifdef TCP_HHOOK
2553 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2554 hhook_run_tcp_est_in(tp, th, &to);
2555 #endif
2556
2557 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2558 maxseg = tcp_maxseg(tp);
2559 if (tlen == 0 &&
2560 (tiwin == tp->snd_wnd ||
2561 (tp->t_flags & TF_SACK_PERMIT))) {
2562 /*
2563 * If this is the first time we've seen a
2564 * FIN from the remote, this is not a
2565 * duplicate and it needs to be processed
2566 * normally. This happens during a
2567 * simultaneous close.
2568 */
2569 if ((thflags & TH_FIN) &&
2570 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2571 tp->t_dupacks = 0;
2572 break;
2573 }
2574 TCPSTAT_INC(tcps_rcvdupack);
2575 /*
2576 * If we have outstanding data (other than
2577 * a window probe), this is a completely
2578 * duplicate ack (ie, window info didn't
2579 * change and FIN isn't set),
2580 * the ack is the biggest we've
2581 * seen and we've seen exactly our rexmt
2582 * threshold of them, assume a packet
2583 * has been dropped and retransmit it.
2584 * Kludge snd_nxt & the congestion
2585 * window so we send only this one
2586 * packet.
2587 *
2588 * We know we're losing at the current
2589 * window size so do congestion avoidance
2590 * (set ssthresh to half the current window
2591 * and pull our congestion window back to
2592 * the new ssthresh).
2593 *
2594 * Dup acks mean that packets have left the
2595 * network (they're now cached at the receiver)
2596 * so bump cwnd by the amount in the receiver
2597 * to keep a constant cwnd packets in the
2598 * network.
2599 *
2600 * When using TCP ECN, notify the peer that
2601 * we reduced the cwnd.
2602 */
2603 /*
2604 * Following 2 kinds of acks should not affect
2605 * dupack counting:
2606 * 1) Old acks
2607 * 2) Acks with SACK but without any new SACK
2608 * information in them. These could result from
2609 * any anomaly in the network like a switch
2610 * duplicating packets or a possible DoS attack.
2611 */
2612 if (th->th_ack != tp->snd_una ||
2613 (tcp_is_sack_recovery(tp, &to) &&
2614 (sack_changed == SACK_NOCHANGE))) {
2615 break;
2616 } else if (!tcp_timer_active(tp, TT_REXMT)) {
2617 tp->t_dupacks = 0;
2618 } else if (++tp->t_dupacks > tcprexmtthresh ||
2619 IN_FASTRECOVERY(tp->t_flags)) {
2620 cc_ack_received(tp, th, nsegs,
2621 CC_DUPACK);
2622 if (V_tcp_do_prr &&
2623 IN_FASTRECOVERY(tp->t_flags) &&
2624 (tp->t_flags & TF_SACK_PERMIT)) {
2625 tcp_do_prr_ack(tp, th, &to,
2626 sack_changed, &maxseg);
2627 } else if (tcp_is_sack_recovery(tp, &to) &&
2628 IN_FASTRECOVERY(tp->t_flags)) {
2629 int awnd;
2630
2631 /*
2632 * Compute the amount of data in flight first.
2633 * We can inject new data into the pipe iff
2634 * we have less than 1/2 the original window's
2635 * worth of data in flight.
2636 */
2637 if (V_tcp_do_newsack) {
2638 awnd = tcp_compute_pipe(tp);
2639 } else {
2640 awnd = (tp->snd_nxt - tp->snd_fack) +
2641 tp->sackhint.sack_bytes_rexmit;
2642 }
2643 if (awnd < tp->snd_ssthresh) {
2644 tp->snd_cwnd += maxseg;
2645 if (tp->snd_cwnd > tp->snd_ssthresh)
2646 tp->snd_cwnd = tp->snd_ssthresh;
2647 }
2648 } else {
2649 tp->snd_cwnd += maxseg;
2650 }
2651 (void) tcp_output(tp);
2652 goto drop;
2653 } else if (tp->t_dupacks == tcprexmtthresh ||
2654 (tp->t_flags & TF_SACK_PERMIT &&
2655 V_tcp_do_newsack &&
2656 tp->sackhint.sacked_bytes >
2657 (tcprexmtthresh - 1) * maxseg)) {
2658 enter_recovery:
2659 /*
2660 * Above is the RFC6675 trigger condition of
2661 * more than (dupthresh-1)*maxseg sacked data.
2662 * If the count of holes in the
2663 * scoreboard is >= dupthresh, we could
2664 * also enter loss recovery, but don't
2665 * have that value readily available.
2666 */
2667 tp->t_dupacks = tcprexmtthresh;
2668 tcp_seq onxt = tp->snd_nxt;
2669
2670 /*
2671 * If we're doing sack, or prr, check
2672 * to see if we're already in sack
2673 * recovery. If we're not doing sack,
2674 * check to see if we're in newreno
2675 * recovery.
2676 */
2677 if (V_tcp_do_prr ||
2678 (tp->t_flags & TF_SACK_PERMIT)) {
2679 if (IN_FASTRECOVERY(tp->t_flags)) {
2680 tp->t_dupacks = 0;
2681 break;
2682 }
2683 } else {
2684 if (SEQ_LEQ(th->th_ack,
2685 tp->snd_recover)) {
2686 tp->t_dupacks = 0;
2687 break;
2688 }
2689 }
2690 /* Congestion signal before ack. */
2691 cc_cong_signal(tp, th, CC_NDUPACK);
2692 cc_ack_received(tp, th, nsegs,
2693 CC_DUPACK);
2694 tcp_timer_activate(tp, TT_REXMT, 0);
2695 tp->t_rtttime = 0;
2696 if (V_tcp_do_prr) {
2697 /*
2698 * snd_ssthresh is already updated by
2699 * cc_cong_signal.
2700 */
2701 if (tcp_is_sack_recovery(tp, &to)) {
2702 /*
2703 * Exclude Limited Transmit
2704 * segments here
2705 */
2706 tp->sackhint.prr_delivered =
2707 maxseg;
2708 } else {
2709 tp->sackhint.prr_delivered =
2710 imin(tp->snd_max - tp->snd_una,
2711 imin(INT_MAX / 65536,
2712 tp->t_dupacks) * maxseg);
2713 }
2714 tp->sackhint.recover_fs = max(1,
2715 tp->snd_nxt - tp->snd_una);
2716 }
2717 if (tcp_is_sack_recovery(tp, &to)) {
2718 TCPSTAT_INC(tcps_sack_recovery_episode);
2719 tp->snd_recover = tp->snd_nxt;
2720 tp->snd_cwnd = maxseg;
2721 (void) tcp_output(tp);
2722 if (SEQ_GT(th->th_ack, tp->snd_una)) {
2723 goto resume_partialack;
2724 }
2725 goto drop;
2726 }
2727 tp->snd_nxt = th->th_ack;
2728 tp->snd_cwnd = maxseg;
2729 (void) tcp_output(tp);
2730 KASSERT(tp->snd_limited <= 2,
2731 ("%s: tp->snd_limited too big",
2732 __func__));
2733 tp->snd_cwnd = tp->snd_ssthresh +
2734 maxseg *
2735 (tp->t_dupacks - tp->snd_limited);
2736 if (SEQ_GT(onxt, tp->snd_nxt))
2737 tp->snd_nxt = onxt;
2738 goto drop;
2739 } else if (V_tcp_do_rfc3042) {
2740 /*
2741 * Process first and second duplicate
2742 * ACKs. Each indicates a segment
2743 * leaving the network, creating room
2744 * for more. Make sure we can send a
2745 * packet on reception of each duplicate
2746 * ACK by increasing snd_cwnd by one
2747 * segment. Restore the original
2748 * snd_cwnd after packet transmission.
2749 */
2750 cc_ack_received(tp, th, nsegs, CC_DUPACK);
2751 uint32_t oldcwnd = tp->snd_cwnd;
2752 tcp_seq oldsndmax = tp->snd_max;
2753 u_int sent;
2754 int avail;
2755
2756 KASSERT(tp->t_dupacks == 1 ||
2757 tp->t_dupacks == 2,
2758 ("%s: dupacks not 1 or 2",
2759 __func__));
2760 if (tp->t_dupacks == 1)
2761 tp->snd_limited = 0;
2762 tp->snd_cwnd =
2763 (tp->snd_nxt - tp->snd_una) +
2764 (tp->t_dupacks - tp->snd_limited) *
2765 maxseg;
2766 /*
2767 * Only call tcp_output when there
2768 * is new data available to be sent
2769 * or we need to send an ACK.
2770 */
2771 SOCKBUF_LOCK(&so->so_snd);
2772 avail = sbavail(&so->so_snd);
2773 SOCKBUF_UNLOCK(&so->so_snd);
2774 if (tp->t_flags & TF_ACKNOW ||
2775 (avail >=
2776 SEQ_SUB(tp->snd_nxt, tp->snd_una))) {
2777 (void) tcp_output(tp);
2778 }
2779 sent = SEQ_SUB(tp->snd_max, oldsndmax);
2780 if (sent > maxseg) {
2781 KASSERT((tp->t_dupacks == 2 &&
2782 tp->snd_limited == 0) ||
2783 (sent == maxseg + 1 &&
2784 tp->t_flags & TF_SENTFIN),
2785 ("%s: sent too much",
2786 __func__));
2787 tp->snd_limited = 2;
2788 } else if (sent > 0) {
2789 ++tp->snd_limited;
2790 }
2791 tp->snd_cwnd = oldcwnd;
2792 goto drop;
2793 }
2794 }
2795 break;
2796 } else {
2797 /*
2798 * This ack is advancing the left edge, reset the
2799 * counter.
2800 */
2801 tp->t_dupacks = 0;
2802 /*
2803 * If this ack also has new SACK info, increment the
2804 * counter as per rfc6675. The variable
2805 * sack_changed tracks all changes to the SACK
2806 * scoreboard, including when partial ACKs without
2807 * SACK options are received, and clear the scoreboard
2808 * from the left side. Such partial ACKs should not be
2809 * counted as dupacks here.
2810 */
2811 if (tcp_is_sack_recovery(tp, &to) &&
2812 (sack_changed != SACK_NOCHANGE)) {
2813 tp->t_dupacks++;
2814 /* limit overhead by setting maxseg last */
2815 if (!IN_FASTRECOVERY(tp->t_flags) &&
2816 (tp->sackhint.sacked_bytes >
2817 ((tcprexmtthresh - 1) *
2818 (maxseg = tcp_maxseg(tp))))) {
2819 goto enter_recovery;
2820 }
2821 }
2822 }
2823
2824 resume_partialack:
2825 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2826 ("%s: th_ack <= snd_una", __func__));
2827
2828 /*
2829 * If the congestion window was inflated to account
2830 * for the other side's cached packets, retract it.
2831 */
2832 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2833 if (IN_FASTRECOVERY(tp->t_flags)) {
2834 if (tp->t_flags & TF_SACK_PERMIT) {
2835 if (V_tcp_do_prr &&
2836 (to.to_flags & TOF_SACK)) {
2837 tcp_timer_activate(tp,
2838 TT_REXMT, 0);
2839 tp->t_rtttime = 0;
2840 tcp_do_prr_ack(tp, th, &to,
2841 sack_changed, &maxseg);
2842 tp->t_flags |= TF_ACKNOW;
2843 (void) tcp_output(tp);
2844 } else {
2845 tcp_sack_partialack(tp, th,
2846 &maxseg);
2847 }
2848 } else {
2849 tcp_newreno_partial_ack(tp, th);
2850 }
2851 } else if (IN_CONGRECOVERY(tp->t_flags) &&
2852 (V_tcp_do_prr)) {
2853 tp->sackhint.delivered_data =
2854 BYTES_THIS_ACK(tp, th);
2855 tp->snd_fack = th->th_ack;
2856 /*
2857 * During ECN cwnd reduction
2858 * always use PRR-SSRB
2859 */
2860 tcp_do_prr_ack(tp, th, &to, SACK_CHANGE,
2861 &maxseg);
2862 (void) tcp_output(tp);
2863 }
2864 }
2865 /*
2866 * If we reach this point, ACK is not a duplicate,
2867 * i.e., it ACKs something we sent.
2868 */
2869 if (tp->t_flags & TF_NEEDSYN) {
2870 /*
2871 * T/TCP: Connection was half-synchronized, and our
2872 * SYN has been ACK'd (so connection is now fully
2873 * synchronized). Go to non-starred state,
2874 * increment snd_una for ACK of SYN, and check if
2875 * we can do window scaling.
2876 */
2877 tp->t_flags &= ~TF_NEEDSYN;
2878 tp->snd_una++;
2879 /* Do window scaling? */
2880 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2881 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2882 tp->rcv_scale = tp->request_r_scale;
2883 /* Send window already scaled. */
2884 }
2885 }
2886
2887 process_ACK:
2888 INP_WLOCK_ASSERT(inp);
2889
2890 /*
2891 * Adjust for the SYN bit in sequence space,
2892 * but don't account for it in cwnd calculations.
2893 * This is for the SYN_RECEIVED, non-simultaneous
2894 * SYN case. SYN_SENT and simultaneous SYN are
2895 * treated elsewhere.
2896 */
2897 if (incforsyn)
2898 tp->snd_una++;
2899 acked = BYTES_THIS_ACK(tp, th);
2900 KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
2901 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
2902 tp->snd_una, th->th_ack, tp, m));
2903 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
2904 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2905
2906 /*
2907 * If we just performed our first retransmit, and the ACK
2908 * arrives within our recovery window, then it was a mistake
2909 * to do the retransmit in the first place. Recover our
2910 * original cwnd and ssthresh, and proceed to transmit where
2911 * we left off.
2912 */
2913 if (tp->t_rxtshift == 1 &&
2914 tp->t_flags & TF_PREVVALID &&
2915 tp->t_badrxtwin != 0 &&
2916 to.to_flags & TOF_TS &&
2917 to.to_tsecr != 0 &&
2918 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin))
2919 cc_cong_signal(tp, th, CC_RTO_ERR);
2920
2921 /*
2922 * If we have a timestamp reply, update smoothed
2923 * round trip time. If no timestamp is present but
2924 * transmit timer is running and timed sequence
2925 * number was acked, update smoothed round trip time.
2926 * Since we now have an rtt measurement, cancel the
2927 * timer backoff (cf., Phil Karn's retransmit alg.).
2928 * Recompute the initial retransmit timer.
2929 *
2930 * Some boxes send broken timestamp replies
2931 * during the SYN+ACK phase, ignore
2932 * timestamps of 0 or we could calculate a
2933 * huge RTT and blow up the retransmit timer.
2934 */
2935 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2936 uint32_t t;
2937
2938 t = tcp_ts_getticks() - to.to_tsecr;
2939 if (!tp->t_rttlow || tp->t_rttlow > t)
2940 tp->t_rttlow = t;
2941 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2942 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2943 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2944 tp->t_rttlow = ticks - tp->t_rtttime;
2945 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2946 }
2947
2948 SOCKBUF_LOCK(&so->so_snd);
2949 /*
2950 * Clear t_acktime if remote side has ACKd all data in the
2951 * socket buffer and FIN (if applicable).
2952 * Otherwise, update t_acktime if we received a sufficiently
2953 * large ACK.
2954 */
2955 if ((tp->t_state <= TCPS_CLOSE_WAIT &&
2956 acked == sbavail(&so->so_snd)) ||
2957 acked > sbavail(&so->so_snd))
2958 tp->t_acktime = 0;
2959 else if (acked > 1)
2960 tp->t_acktime = ticks;
2961
2962 /*
2963 * If all outstanding data is acked, stop retransmit
2964 * timer and remember to restart (more output or persist).
2965 * If there is more data to be acked, restart retransmit
2966 * timer, using current (possibly backed-off) value.
2967 */
2968 if (th->th_ack == tp->snd_max) {
2969 tcp_timer_activate(tp, TT_REXMT, 0);
2970 needoutput = 1;
2971 } else if (!tcp_timer_active(tp, TT_PERSIST))
2972 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
2973
2974 /*
2975 * If no data (only SYN) was ACK'd,
2976 * skip rest of ACK processing.
2977 */
2978 if (acked == 0) {
2979 SOCKBUF_UNLOCK(&so->so_snd);
2980 goto step6;
2981 }
2982
2983 /*
2984 * Let the congestion control algorithm update congestion
2985 * control related information. This typically means increasing
2986 * the congestion window.
2987 */
2988 cc_ack_received(tp, th, nsegs, CC_ACK);
2989
2990 if (acked > sbavail(&so->so_snd)) {
2991 if (tp->snd_wnd >= sbavail(&so->so_snd))
2992 tp->snd_wnd -= sbavail(&so->so_snd);
2993 else
2994 tp->snd_wnd = 0;
2995 mfree = sbcut_locked(&so->so_snd,
2996 (int)sbavail(&so->so_snd));
2997 ourfinisacked = 1;
2998 } else {
2999 mfree = sbcut_locked(&so->so_snd, acked);
3000 if (tp->snd_wnd >= (uint32_t) acked)
3001 tp->snd_wnd -= acked;
3002 else
3003 tp->snd_wnd = 0;
3004 ourfinisacked = 0;
3005 }
3006 /* NB: sowwakeup_locked() does an implicit unlock. */
3007 sowwakeup_locked(so);
3008 m_freem(mfree);
3009 /* Detect una wraparound. */
3010 if (!IN_RECOVERY(tp->t_flags) &&
3011 SEQ_GT(tp->snd_una, tp->snd_recover) &&
3012 SEQ_LEQ(th->th_ack, tp->snd_recover))
3013 tp->snd_recover = th->th_ack - 1;
3014 tp->snd_una = th->th_ack;
3015 if (IN_RECOVERY(tp->t_flags) &&
3016 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
3017 cc_post_recovery(tp, th);
3018 }
3019 if (tp->t_flags & TF_SACK_PERMIT) {
3020 if (SEQ_GT(tp->snd_una, tp->snd_recover))
3021 tp->snd_recover = tp->snd_una;
3022 }
3023 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
3024 tp->snd_nxt = tp->snd_una;
3025
3026 switch (tp->t_state) {
3027 /*
3028 * In FIN_WAIT_1 STATE in addition to the processing
3029 * for the ESTABLISHED state if our FIN is now acknowledged
3030 * then enter FIN_WAIT_2.
3031 */
3032 case TCPS_FIN_WAIT_1:
3033 if (ourfinisacked) {
3034 /*
3035 * If we can't receive any more
3036 * data, then closing user can proceed.
3037 * Starting the timer is contrary to the
3038 * specification, but if we don't get a FIN
3039 * we'll hang forever.
3040 */
3041 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3042 tcp_free_sackholes(tp);
3043 soisdisconnected(so);
3044 tcp_timer_activate(tp, TT_2MSL,
3045 (tcp_fast_finwait2_recycle ?
3046 tcp_finwait2_timeout :
3047 TP_MAXIDLE(tp)));
3048 }
3049 tcp_state_change(tp, TCPS_FIN_WAIT_2);
3050 }
3051 break;
3052
3053 /*
3054 * In CLOSING STATE in addition to the processing for
3055 * the ESTABLISHED state if the ACK acknowledges our FIN
3056 * then enter the TIME-WAIT state, otherwise ignore
3057 * the segment.
3058 */
3059 case TCPS_CLOSING:
3060 if (ourfinisacked) {
3061 tcp_twstart(tp);
3062 m_freem(m);
3063 return;
3064 }
3065 break;
3066
3067 /*
3068 * In LAST_ACK, we may still be waiting for data to drain
3069 * and/or to be acked, as well as for the ack of our FIN.
3070 * If our FIN is now acknowledged, delete the TCB,
3071 * enter the closed state and return.
3072 */
3073 case TCPS_LAST_ACK:
3074 if (ourfinisacked) {
3075 tp = tcp_close(tp);
3076 goto drop;
3077 }
3078 break;
3079 }
3080 }
3081
3082 step6:
3083 INP_WLOCK_ASSERT(inp);
3084
3085 /*
3086 * Update window information.
3087 * Don't look at window if no ACK: TAC's send garbage on first SYN.
3088 */
3089 if ((thflags & TH_ACK) &&
3090 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
3091 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
3092 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
3093 /* keep track of pure window updates */
3094 if (tlen == 0 &&
3095 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
3096 TCPSTAT_INC(tcps_rcvwinupd);
3097 tp->snd_wnd = tiwin;
3098 tp->snd_wl1 = th->th_seq;
3099 tp->snd_wl2 = th->th_ack;
3100 if (tp->snd_wnd > tp->max_sndwnd)
3101 tp->max_sndwnd = tp->snd_wnd;
3102 needoutput = 1;
3103 }
3104
3105 /*
3106 * Process segments with URG.
3107 */
3108 if ((thflags & TH_URG) && th->th_urp &&
3109 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3110 /*
3111 * This is a kludge, but if we receive and accept
3112 * random urgent pointers, we'll crash in
3113 * soreceive. It's hard to imagine someone
3114 * actually wanting to send this much urgent data.
3115 */
3116 SOCKBUF_LOCK(&so->so_rcv);
3117 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
3118 th->th_urp = 0; /* XXX */
3119 thflags &= ~TH_URG; /* XXX */
3120 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
3121 goto dodata; /* XXX */
3122 }
3123 /*
3124 * If this segment advances the known urgent pointer,
3125 * then mark the data stream. This should not happen
3126 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
3127 * a FIN has been received from the remote side.
3128 * In these states we ignore the URG.
3129 *
3130 * According to RFC961 (Assigned Protocols),
3131 * the urgent pointer points to the last octet
3132 * of urgent data. We continue, however,
3133 * to consider it to indicate the first octet
3134 * of data past the urgent section as the original
3135 * spec states (in one of two places).
3136 */
3137 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
3138 tp->rcv_up = th->th_seq + th->th_urp;
3139 so->so_oobmark = sbavail(&so->so_rcv) +
3140 (tp->rcv_up - tp->rcv_nxt) - 1;
3141 if (so->so_oobmark == 0)
3142 so->so_rcv.sb_state |= SBS_RCVATMARK;
3143 sohasoutofband(so);
3144 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
3145 }
3146 SOCKBUF_UNLOCK(&so->so_rcv);
3147 /*
3148 * Remove out of band data so doesn't get presented to user.
3149 * This can happen independent of advancing the URG pointer,
3150 * but if two URG's are pending at once, some out-of-band
3151 * data may creep in... ick.
3152 */
3153 if (th->th_urp <= (uint32_t)tlen &&
3154 !(so->so_options & SO_OOBINLINE)) {
3155 /* hdr drop is delayed */
3156 tcp_pulloutofband(so, th, m, drop_hdrlen);
3157 }
3158 } else {
3159 /*
3160 * If no out of band data is expected,
3161 * pull receive urgent pointer along
3162 * with the receive window.
3163 */
3164 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3165 tp->rcv_up = tp->rcv_nxt;
3166 }
3167 dodata: /* XXX */
3168 INP_WLOCK_ASSERT(inp);
3169
3170 /*
3171 * Process the segment text, merging it into the TCP sequencing queue,
3172 * and arranging for acknowledgment of receipt if necessary.
3173 * This process logically involves adjusting tp->rcv_wnd as data
3174 * is presented to the user (this happens in tcp_usrreq.c,
3175 * case PRU_RCVD). If a FIN has already been received on this
3176 * connection then we just ignore the text.
3177 */
3178 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
3179 (tp->t_flags & TF_FASTOPEN));
3180 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
3181 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3182 tcp_seq save_start = th->th_seq;
3183 tcp_seq save_rnxt = tp->rcv_nxt;
3184 int save_tlen = tlen;
3185 m_adj(m, drop_hdrlen); /* delayed header drop */
3186 /*
3187 * Insert segment which includes th into TCP reassembly queue
3188 * with control block tp. Set thflags to whether reassembly now
3189 * includes a segment with FIN. This handles the common case
3190 * inline (segment is the next to be received on an established
3191 * connection, and the queue is empty), avoiding linkage into
3192 * and removal from the queue and repetition of various
3193 * conversions.
3194 * Set DELACK for segments received in order, but ack
3195 * immediately when segments are out of order (so
3196 * fast retransmit can work).
3197 */
3198 if (th->th_seq == tp->rcv_nxt &&
3199 SEGQ_EMPTY(tp) &&
3200 (TCPS_HAVEESTABLISHED(tp->t_state) ||
3201 tfo_syn)) {
3202 if (DELAY_ACK(tp, tlen) || tfo_syn)
3203 tp->t_flags |= TF_DELACK;
3204 else
3205 tp->t_flags |= TF_ACKNOW;
3206 tp->rcv_nxt += tlen;
3207 if (tlen &&
3208 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
3209 (tp->t_fbyte_in == 0)) {
3210 tp->t_fbyte_in = ticks;
3211 if (tp->t_fbyte_in == 0)
3212 tp->t_fbyte_in = 1;
3213 if (tp->t_fbyte_out && tp->t_fbyte_in)
3214 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
3215 }
3216 thflags = tcp_get_flags(th) & TH_FIN;
3217 TCPSTAT_INC(tcps_rcvpack);
3218 TCPSTAT_ADD(tcps_rcvbyte, tlen);
3219 SOCKBUF_LOCK(&so->so_rcv);
3220 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3221 m_freem(m);
3222 else
3223 sbappendstream_locked(&so->so_rcv, m, 0);
3224 tp->t_flags |= TF_WAKESOR;
3225 } else {
3226 /*
3227 * XXX: Due to the header drop above "th" is
3228 * theoretically invalid by now. Fortunately
3229 * m_adj() doesn't actually frees any mbufs
3230 * when trimming from the head.
3231 */
3232 tcp_seq temp = save_start;
3233
3234 thflags = tcp_reass(tp, th, &temp, &tlen, m);
3235 tp->t_flags |= TF_ACKNOW;
3236 }
3237 if ((tp->t_flags & TF_SACK_PERMIT) &&
3238 (save_tlen > 0) &&
3239 TCPS_HAVEESTABLISHED(tp->t_state)) {
3240 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
3241 /*
3242 * DSACK actually handled in the fastpath
3243 * above.
3244 */
3245 tcp_update_sack_list(tp, save_start,
3246 save_start + save_tlen);
3247 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
3248 if ((tp->rcv_numsacks >= 1) &&
3249 (tp->sackblks[0].end == save_start)) {
3250 /*
3251 * Partial overlap, recorded at todrop
3252 * above.
3253 */
3254 tcp_update_sack_list(tp,
3255 tp->sackblks[0].start,
3256 tp->sackblks[0].end);
3257 } else {
3258 tcp_update_dsack_list(tp, save_start,
3259 save_start + save_tlen);
3260 }
3261 } else if (tlen >= save_tlen) {
3262 /* Update of sackblks. */
3263 tcp_update_dsack_list(tp, save_start,
3264 save_start + save_tlen);
3265 } else if (tlen > 0) {
3266 tcp_update_dsack_list(tp, save_start,
3267 save_start + tlen);
3268 }
3269 }
3270 tcp_handle_wakeup(tp);
3271 #if 0
3272 /*
3273 * Note the amount of data that peer has sent into
3274 * our window, in order to estimate the sender's
3275 * buffer size.
3276 * XXX: Unused.
3277 */
3278 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
3279 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
3280 else
3281 len = so->so_rcv.sb_hiwat;
3282 #endif
3283 } else {
3284 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
3285 if (tlen > 0) {
3286 if ((thflags & TH_FIN) != 0) {
3287 log(LOG_DEBUG, "%s; %s: %s: "
3288 "Received %d bytes of data and FIN "
3289 "after having received a FIN, "
3290 "just dropping both\n",
3291 s, __func__,
3292 tcpstates[tp->t_state], tlen);
3293 } else {
3294 log(LOG_DEBUG, "%s; %s: %s: "
3295 "Received %d bytes of data "
3296 "after having received a FIN, "
3297 "just dropping it\n",
3298 s, __func__,
3299 tcpstates[tp->t_state], tlen);
3300 }
3301 } else {
3302 if ((thflags & TH_FIN) != 0) {
3303 log(LOG_DEBUG, "%s; %s: %s: "
3304 "Received FIN "
3305 "after having received a FIN, "
3306 "just dropping it\n",
3307 s, __func__,
3308 tcpstates[tp->t_state]);
3309 }
3310 }
3311 free(s, M_TCPLOG);
3312 }
3313 m_freem(m);
3314 thflags &= ~TH_FIN;
3315 }
3316
3317 /*
3318 * If FIN is received ACK the FIN and let the user know
3319 * that the connection is closing.
3320 */
3321 if (thflags & TH_FIN) {
3322 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3323 /* The socket upcall is handled by socantrcvmore. */
3324 socantrcvmore(so);
3325 /*
3326 * If connection is half-synchronized
3327 * (ie NEEDSYN flag on) then delay ACK,
3328 * so it may be piggybacked when SYN is sent.
3329 * Otherwise, since we received a FIN then no
3330 * more input can be expected, send ACK now.
3331 */
3332 if (tp->t_flags & TF_NEEDSYN)
3333 tp->t_flags |= TF_DELACK;
3334 else
3335 tp->t_flags |= TF_ACKNOW;
3336 tp->rcv_nxt++;
3337 }
3338 switch (tp->t_state) {
3339 /*
3340 * In SYN_RECEIVED and ESTABLISHED STATES
3341 * enter the CLOSE_WAIT state.
3342 */
3343 case TCPS_SYN_RECEIVED:
3344 tp->t_starttime = ticks;
3345 /* FALLTHROUGH */
3346 case TCPS_ESTABLISHED:
3347 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3348 break;
3349
3350 /*
3351 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3352 * enter the CLOSING state.
3353 */
3354 case TCPS_FIN_WAIT_1:
3355 tcp_state_change(tp, TCPS_CLOSING);
3356 break;
3357
3358 /*
3359 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3360 * starting the time-wait timer, turning off the other
3361 * standard timers.
3362 */
3363 case TCPS_FIN_WAIT_2:
3364 tcp_twstart(tp);
3365 return;
3366 }
3367 }
3368 TCP_PROBE3(debug__input, tp, th, m);
3369
3370 /*
3371 * Return any desired output.
3372 */
3373 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
3374 (void) tcp_output(tp);
3375 }
3376 check_delack:
3377 INP_WLOCK_ASSERT(inp);
3378
3379 if (tp->t_flags & TF_DELACK) {
3380 tp->t_flags &= ~TF_DELACK;
3381 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3382 }
3383 INP_WUNLOCK(inp);
3384 return;
3385
3386 dropafterack:
3387 /*
3388 * Generate an ACK dropping incoming segment if it occupies
3389 * sequence space, where the ACK reflects our state.
3390 *
3391 * We can now skip the test for the RST flag since all
3392 * paths to this code happen after packets containing
3393 * RST have been dropped.
3394 *
3395 * In the SYN-RECEIVED state, don't send an ACK unless the
3396 * segment we received passes the SYN-RECEIVED ACK test.
3397 * If it fails send a RST. This breaks the loop in the
3398 * "LAND" DoS attack, and also prevents an ACK storm
3399 * between two listening ports that have been sent forged
3400 * SYN segments, each with the source address of the other.
3401 */
3402 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3403 (SEQ_GT(tp->snd_una, th->th_ack) ||
3404 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3405 rstreason = BANDLIM_RST_OPENPORT;
3406 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
3407 goto dropwithreset;
3408 }
3409 TCP_PROBE3(debug__input, tp, th, m);
3410 tp->t_flags |= TF_ACKNOW;
3411 (void) tcp_output(tp);
3412 INP_WUNLOCK(inp);
3413 m_freem(m);
3414 return;
3415
3416 dropwithreset:
3417 if (tp != NULL) {
3418 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3419 INP_WUNLOCK(inp);
3420 } else
3421 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3422 return;
3423
3424 drop:
3425 /*
3426 * Drop space held by incoming segment and return.
3427 */
3428 TCP_PROBE3(debug__input, tp, th, m);
3429 if (tp != NULL) {
3430 INP_WUNLOCK(inp);
3431 }
3432 m_freem(m);
3433 }
3434
3435 /*
3436 * Issue RST and make ACK acceptable to originator of segment.
3437 * The mbuf must still include the original packet header.
3438 * tp may be NULL.
3439 */
3440 void
tcp_dropwithreset(struct mbuf * m,struct tcphdr * th,struct tcpcb * tp,int tlen,int rstreason)3441 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3442 int tlen, int rstreason)
3443 {
3444 #ifdef INET
3445 struct ip *ip;
3446 #endif
3447 #ifdef INET6
3448 struct ip6_hdr *ip6;
3449 #endif
3450
3451 if (tp != NULL) {
3452 INP_LOCK_ASSERT(tptoinpcb(tp));
3453 }
3454
3455 /* Don't bother if destination was broadcast/multicast. */
3456 if ((tcp_get_flags(th) & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3457 goto drop;
3458 #ifdef INET6
3459 if (mtod(m, struct ip *)->ip_v == 6) {
3460 ip6 = mtod(m, struct ip6_hdr *);
3461 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3462 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3463 goto drop;
3464 /* IPv6 anycast check is done at tcp6_input() */
3465 }
3466 #endif
3467 #if defined(INET) && defined(INET6)
3468 else
3469 #endif
3470 #ifdef INET
3471 {
3472 ip = mtod(m, struct ip *);
3473 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3474 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3475 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3476 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3477 goto drop;
3478 }
3479 #endif
3480
3481 /* Perform bandwidth limiting. */
3482 if (badport_bandlim(rstreason) < 0)
3483 goto drop;
3484
3485 /* tcp_respond consumes the mbuf chain. */
3486 if (tcp_get_flags(th) & TH_ACK) {
3487 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3488 th->th_ack, TH_RST);
3489 } else {
3490 if (tcp_get_flags(th) & TH_SYN)
3491 tlen++;
3492 if (tcp_get_flags(th) & TH_FIN)
3493 tlen++;
3494 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3495 (tcp_seq)0, TH_RST|TH_ACK);
3496 }
3497 return;
3498 drop:
3499 m_freem(m);
3500 }
3501
3502 /*
3503 * Parse TCP options and place in tcpopt.
3504 */
3505 void
tcp_dooptions(struct tcpopt * to,u_char * cp,int cnt,int flags)3506 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3507 {
3508 int opt, optlen;
3509
3510 to->to_flags = 0;
3511 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3512 opt = cp[0];
3513 if (opt == TCPOPT_EOL)
3514 break;
3515 if (opt == TCPOPT_NOP)
3516 optlen = 1;
3517 else {
3518 if (cnt < 2)
3519 break;
3520 optlen = cp[1];
3521 if (optlen < 2 || optlen > cnt)
3522 break;
3523 }
3524 switch (opt) {
3525 case TCPOPT_MAXSEG:
3526 if (optlen != TCPOLEN_MAXSEG)
3527 continue;
3528 if (!(flags & TO_SYN))
3529 continue;
3530 to->to_flags |= TOF_MSS;
3531 bcopy((char *)cp + 2,
3532 (char *)&to->to_mss, sizeof(to->to_mss));
3533 to->to_mss = ntohs(to->to_mss);
3534 break;
3535 case TCPOPT_WINDOW:
3536 if (optlen != TCPOLEN_WINDOW)
3537 continue;
3538 if (!(flags & TO_SYN))
3539 continue;
3540 to->to_flags |= TOF_SCALE;
3541 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3542 break;
3543 case TCPOPT_TIMESTAMP:
3544 if (optlen != TCPOLEN_TIMESTAMP)
3545 continue;
3546 to->to_flags |= TOF_TS;
3547 bcopy((char *)cp + 2,
3548 (char *)&to->to_tsval, sizeof(to->to_tsval));
3549 to->to_tsval = ntohl(to->to_tsval);
3550 bcopy((char *)cp + 6,
3551 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3552 to->to_tsecr = ntohl(to->to_tsecr);
3553 break;
3554 case TCPOPT_SIGNATURE:
3555 /*
3556 * In order to reply to a host which has set the
3557 * TCP_SIGNATURE option in its initial SYN, we have
3558 * to record the fact that the option was observed
3559 * here for the syncache code to perform the correct
3560 * response.
3561 */
3562 if (optlen != TCPOLEN_SIGNATURE)
3563 continue;
3564 to->to_flags |= TOF_SIGNATURE;
3565 to->to_signature = cp + 2;
3566 break;
3567 case TCPOPT_SACK_PERMITTED:
3568 if (optlen != TCPOLEN_SACK_PERMITTED)
3569 continue;
3570 if (!(flags & TO_SYN))
3571 continue;
3572 if (!V_tcp_do_sack)
3573 continue;
3574 to->to_flags |= TOF_SACKPERM;
3575 break;
3576 case TCPOPT_SACK:
3577 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3578 continue;
3579 if (flags & TO_SYN)
3580 continue;
3581 to->to_flags |= TOF_SACK;
3582 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3583 to->to_sacks = cp + 2;
3584 TCPSTAT_INC(tcps_sack_rcv_blocks);
3585 break;
3586 case TCPOPT_FAST_OPEN:
3587 /*
3588 * Cookie length validation is performed by the
3589 * server side cookie checking code or the client
3590 * side cookie cache update code.
3591 */
3592 if (!(flags & TO_SYN))
3593 continue;
3594 if (!V_tcp_fastopen_client_enable &&
3595 !V_tcp_fastopen_server_enable)
3596 continue;
3597 to->to_flags |= TOF_FASTOPEN;
3598 to->to_tfo_len = optlen - 2;
3599 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL;
3600 break;
3601 default:
3602 continue;
3603 }
3604 }
3605 }
3606
3607 /*
3608 * Pull out of band byte out of a segment so
3609 * it doesn't appear in the user's data queue.
3610 * It is still reflected in the segment length for
3611 * sequencing purposes.
3612 */
3613 void
tcp_pulloutofband(struct socket * so,struct tcphdr * th,struct mbuf * m,int off)3614 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3615 int off)
3616 {
3617 int cnt = off + th->th_urp - 1;
3618
3619 while (cnt >= 0) {
3620 if (m->m_len > cnt) {
3621 char *cp = mtod(m, caddr_t) + cnt;
3622 struct tcpcb *tp = sototcpcb(so);
3623
3624 INP_WLOCK_ASSERT(tptoinpcb(tp));
3625
3626 tp->t_iobc = *cp;
3627 tp->t_oobflags |= TCPOOB_HAVEDATA;
3628 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3629 m->m_len--;
3630 if (m->m_flags & M_PKTHDR)
3631 m->m_pkthdr.len--;
3632 return;
3633 }
3634 cnt -= m->m_len;
3635 m = m->m_next;
3636 if (m == NULL)
3637 break;
3638 }
3639 panic("tcp_pulloutofband");
3640 }
3641
3642 /*
3643 * Collect new round-trip time estimate
3644 * and update averages and current timeout.
3645 */
3646 void
tcp_xmit_timer(struct tcpcb * tp,int rtt)3647 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3648 {
3649 int delta;
3650
3651 INP_WLOCK_ASSERT(tptoinpcb(tp));
3652
3653 TCPSTAT_INC(tcps_rttupdated);
3654 if (tp->t_rttupdated < UCHAR_MAX)
3655 tp->t_rttupdated++;
3656 #ifdef STATS
3657 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT,
3658 imax(0, rtt * 1000 / hz));
3659 #endif
3660 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) {
3661 /*
3662 * srtt is stored as fixed point with 5 bits after the
3663 * binary point (i.e., scaled by 8). The following magic
3664 * is equivalent to the smoothing algorithm in rfc793 with
3665 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3666 * point). Adjust rtt to origin 0.
3667 */
3668 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3669 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3670
3671 if ((tp->t_srtt += delta) <= 0)
3672 tp->t_srtt = 1;
3673
3674 /*
3675 * We accumulate a smoothed rtt variance (actually, a
3676 * smoothed mean difference), then set the retransmit
3677 * timer to smoothed rtt + 4 times the smoothed variance.
3678 * rttvar is stored as fixed point with 4 bits after the
3679 * binary point (scaled by 16). The following is
3680 * equivalent to rfc793 smoothing with an alpha of .75
3681 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3682 * rfc793's wired-in beta.
3683 */
3684 if (delta < 0)
3685 delta = -delta;
3686 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3687 if ((tp->t_rttvar += delta) <= 0)
3688 tp->t_rttvar = 1;
3689 } else {
3690 /*
3691 * No rtt measurement yet - use the unsmoothed rtt.
3692 * Set the variance to half the rtt (so our first
3693 * retransmit happens at 3*rtt).
3694 */
3695 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3696 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3697 }
3698 tp->t_rtttime = 0;
3699 tp->t_rxtshift = 0;
3700
3701 /*
3702 * the retransmit should happen at rtt + 4 * rttvar.
3703 * Because of the way we do the smoothing, srtt and rttvar
3704 * will each average +1/2 tick of bias. When we compute
3705 * the retransmit timer, we want 1/2 tick of rounding and
3706 * 1 extra tick because of +-1/2 tick uncertainty in the
3707 * firing of the timer. The bias will give us exactly the
3708 * 1.5 tick we need. But, because the bias is
3709 * statistical, we have to test that we don't drop below
3710 * the minimum feasible timer (which is 2 ticks).
3711 */
3712 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3713 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3714
3715 /*
3716 * We received an ack for a packet that wasn't retransmitted;
3717 * it is probably safe to discard any error indications we've
3718 * received recently. This isn't quite right, but close enough
3719 * for now (a route might have failed after we sent a segment,
3720 * and the return path might not be symmetrical).
3721 */
3722 tp->t_softerror = 0;
3723 }
3724
3725 /*
3726 * Determine a reasonable value for maxseg size.
3727 * If the route is known, check route for mtu.
3728 * If none, use an mss that can be handled on the outgoing interface
3729 * without forcing IP to fragment. If no route is found, route has no mtu,
3730 * or the destination isn't local, use a default, hopefully conservative
3731 * size (usually 512 or the default IP max size, but no more than the mtu
3732 * of the interface), as we can't discover anything about intervening
3733 * gateways or networks. We also initialize the congestion/slow start
3734 * window to be a single segment if the destination isn't local.
3735 * While looking at the routing entry, we also initialize other path-dependent
3736 * parameters from pre-set or cached values in the routing entry.
3737 *
3738 * NOTE that resulting t_maxseg doesn't include space for TCP options or
3739 * IP options, e.g. IPSEC data, since length of this data may vary, and
3740 * thus it is calculated for every segment separately in tcp_output().
3741 *
3742 * NOTE that this routine is only called when we process an incoming
3743 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3744 * settings are handled in tcp_mssopt().
3745 */
3746 void
tcp_mss_update(struct tcpcb * tp,int offer,int mtuoffer,struct hc_metrics_lite * metricptr,struct tcp_ifcap * cap)3747 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3748 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3749 {
3750 int mss = 0;
3751 uint32_t maxmtu = 0;
3752 struct inpcb *inp = tptoinpcb(tp);
3753 struct hc_metrics_lite metrics;
3754 #ifdef INET6
3755 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3756 size_t min_protoh = isipv6 ?
3757 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3758 sizeof (struct tcpiphdr);
3759 #else
3760 size_t min_protoh = sizeof(struct tcpiphdr);
3761 #endif
3762
3763 INP_WLOCK_ASSERT(inp);
3764
3765 if (tp->t_port)
3766 min_protoh += V_tcp_udp_tunneling_overhead;
3767 if (mtuoffer != -1) {
3768 KASSERT(offer == -1, ("%s: conflict", __func__));
3769 offer = mtuoffer - min_protoh;
3770 }
3771
3772 /* Initialize. */
3773 #ifdef INET6
3774 if (isipv6) {
3775 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3776 tp->t_maxseg = V_tcp_v6mssdflt;
3777 }
3778 #endif
3779 #if defined(INET) && defined(INET6)
3780 else
3781 #endif
3782 #ifdef INET
3783 {
3784 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3785 tp->t_maxseg = V_tcp_mssdflt;
3786 }
3787 #endif
3788
3789 /*
3790 * No route to sender, stay with default mss and return.
3791 */
3792 if (maxmtu == 0) {
3793 /*
3794 * In case we return early we need to initialize metrics
3795 * to a defined state as tcp_hc_get() would do for us
3796 * if there was no cache hit.
3797 */
3798 if (metricptr != NULL)
3799 bzero(metricptr, sizeof(struct hc_metrics_lite));
3800 return;
3801 }
3802
3803 /* What have we got? */
3804 switch (offer) {
3805 case 0:
3806 /*
3807 * Offer == 0 means that there was no MSS on the SYN
3808 * segment, in this case we use tcp_mssdflt as
3809 * already assigned to t_maxseg above.
3810 */
3811 offer = tp->t_maxseg;
3812 break;
3813
3814 case -1:
3815 /*
3816 * Offer == -1 means that we didn't receive SYN yet.
3817 */
3818 /* FALLTHROUGH */
3819
3820 default:
3821 /*
3822 * Prevent DoS attack with too small MSS. Round up
3823 * to at least minmss.
3824 */
3825 offer = max(offer, V_tcp_minmss);
3826 }
3827
3828 /*
3829 * rmx information is now retrieved from tcp_hostcache.
3830 */
3831 tcp_hc_get(&inp->inp_inc, &metrics);
3832 if (metricptr != NULL)
3833 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3834
3835 /*
3836 * If there's a discovered mtu in tcp hostcache, use it.
3837 * Else, use the link mtu.
3838 */
3839 if (metrics.rmx_mtu)
3840 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3841 else {
3842 #ifdef INET6
3843 if (isipv6) {
3844 mss = maxmtu - min_protoh;
3845 if (!V_path_mtu_discovery &&
3846 !in6_localaddr(&inp->in6p_faddr))
3847 mss = min(mss, V_tcp_v6mssdflt);
3848 }
3849 #endif
3850 #if defined(INET) && defined(INET6)
3851 else
3852 #endif
3853 #ifdef INET
3854 {
3855 mss = maxmtu - min_protoh;
3856 if (!V_path_mtu_discovery &&
3857 !in_localaddr(inp->inp_faddr))
3858 mss = min(mss, V_tcp_mssdflt);
3859 }
3860 #endif
3861 /*
3862 * XXX - The above conditional (mss = maxmtu - min_protoh)
3863 * probably violates the TCP spec.
3864 * The problem is that, since we don't know the
3865 * other end's MSS, we are supposed to use a conservative
3866 * default. But, if we do that, then MTU discovery will
3867 * never actually take place, because the conservative
3868 * default is much less than the MTUs typically seen
3869 * on the Internet today. For the moment, we'll sweep
3870 * this under the carpet.
3871 *
3872 * The conservative default might not actually be a problem
3873 * if the only case this occurs is when sending an initial
3874 * SYN with options and data to a host we've never talked
3875 * to before. Then, they will reply with an MSS value which
3876 * will get recorded and the new parameters should get
3877 * recomputed. For Further Study.
3878 */
3879 }
3880 mss = min(mss, offer);
3881
3882 /*
3883 * Sanity check: make sure that maxseg will be large
3884 * enough to allow some data on segments even if the
3885 * all the option space is used (40bytes). Otherwise
3886 * funny things may happen in tcp_output.
3887 *
3888 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3889 */
3890 mss = max(mss, 64);
3891
3892 tp->t_maxseg = mss;
3893 if (tp->t_maxseg < V_tcp_mssdflt) {
3894 /*
3895 * The MSS is so small we should not process incoming
3896 * SACK's since we are subject to attack in such a
3897 * case.
3898 */
3899 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT;
3900 } else {
3901 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT;
3902 }
3903
3904 }
3905
3906 void
tcp_mss(struct tcpcb * tp,int offer)3907 tcp_mss(struct tcpcb *tp, int offer)
3908 {
3909 int mss;
3910 uint32_t bufsize;
3911 struct inpcb *inp = tptoinpcb(tp);
3912 struct socket *so;
3913 struct hc_metrics_lite metrics;
3914 struct tcp_ifcap cap;
3915
3916 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3917
3918 bzero(&cap, sizeof(cap));
3919 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3920
3921 mss = tp->t_maxseg;
3922
3923 /*
3924 * If there's a pipesize, change the socket buffer to that size,
3925 * don't change if sb_hiwat is different than default (then it
3926 * has been changed on purpose with setsockopt).
3927 * Make the socket buffers an integral number of mss units;
3928 * if the mss is larger than the socket buffer, decrease the mss.
3929 */
3930 so = inp->inp_socket;
3931 SOCKBUF_LOCK(&so->so_snd);
3932 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3933 bufsize = metrics.rmx_sendpipe;
3934 else
3935 bufsize = so->so_snd.sb_hiwat;
3936 if (bufsize < mss)
3937 mss = bufsize;
3938 else {
3939 bufsize = roundup(bufsize, mss);
3940 if (bufsize > sb_max)
3941 bufsize = sb_max;
3942 if (bufsize > so->so_snd.sb_hiwat)
3943 (void)sbreserve_locked(so, SO_SND, bufsize, NULL);
3944 }
3945 SOCKBUF_UNLOCK(&so->so_snd);
3946 /*
3947 * Sanity check: make sure that maxseg will be large
3948 * enough to allow some data on segments even if the
3949 * all the option space is used (40bytes). Otherwise
3950 * funny things may happen in tcp_output.
3951 *
3952 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3953 */
3954 tp->t_maxseg = max(mss, 64);
3955 if (tp->t_maxseg < V_tcp_mssdflt) {
3956 /*
3957 * The MSS is so small we should not process incoming
3958 * SACK's since we are subject to attack in such a
3959 * case.
3960 */
3961 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT;
3962 } else {
3963 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT;
3964 }
3965
3966 SOCKBUF_LOCK(&so->so_rcv);
3967 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3968 bufsize = metrics.rmx_recvpipe;
3969 else
3970 bufsize = so->so_rcv.sb_hiwat;
3971 if (bufsize > mss) {
3972 bufsize = roundup(bufsize, mss);
3973 if (bufsize > sb_max)
3974 bufsize = sb_max;
3975 if (bufsize > so->so_rcv.sb_hiwat)
3976 (void)sbreserve_locked(so, SO_RCV, bufsize, NULL);
3977 }
3978 SOCKBUF_UNLOCK(&so->so_rcv);
3979
3980 /* Check the interface for TSO capabilities. */
3981 if (cap.ifcap & CSUM_TSO) {
3982 tp->t_flags |= TF_TSO;
3983 tp->t_tsomax = cap.tsomax;
3984 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3985 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3986 }
3987 }
3988
3989 /*
3990 * Determine the MSS option to send on an outgoing SYN.
3991 */
3992 int
tcp_mssopt(struct in_conninfo * inc)3993 tcp_mssopt(struct in_conninfo *inc)
3994 {
3995 int mss = 0;
3996 uint32_t thcmtu = 0;
3997 uint32_t maxmtu = 0;
3998 size_t min_protoh;
3999
4000 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
4001
4002 #ifdef INET6
4003 if (inc->inc_flags & INC_ISIPV6) {
4004 mss = V_tcp_v6mssdflt;
4005 maxmtu = tcp_maxmtu6(inc, NULL);
4006 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
4007 }
4008 #endif
4009 #if defined(INET) && defined(INET6)
4010 else
4011 #endif
4012 #ifdef INET
4013 {
4014 mss = V_tcp_mssdflt;
4015 maxmtu = tcp_maxmtu(inc, NULL);
4016 min_protoh = sizeof(struct tcpiphdr);
4017 }
4018 #endif
4019 #if defined(INET6) || defined(INET)
4020 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
4021 #endif
4022
4023 if (maxmtu && thcmtu)
4024 mss = min(maxmtu, thcmtu) - min_protoh;
4025 else if (maxmtu || thcmtu)
4026 mss = max(maxmtu, thcmtu) - min_protoh;
4027
4028 return (mss);
4029 }
4030
4031 void
tcp_do_prr_ack(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to,sackstatus_t sack_changed,u_int * maxsegp)4032 tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to,
4033 sackstatus_t sack_changed, u_int *maxsegp)
4034 {
4035 int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0;
4036 u_int maxseg;
4037
4038 INP_WLOCK_ASSERT(tptoinpcb(tp));
4039
4040 if (*maxsegp == 0) {
4041 *maxsegp = tcp_maxseg(tp);
4042 }
4043 maxseg = *maxsegp;
4044 /*
4045 * Compute the amount of data that this ACK is indicating
4046 * (del_data) and an estimate of how many bytes are in the
4047 * network.
4048 */
4049 if (tcp_is_sack_recovery(tp, to) ||
4050 (IN_CONGRECOVERY(tp->t_flags) &&
4051 !IN_FASTRECOVERY(tp->t_flags))) {
4052 del_data = tp->sackhint.delivered_data;
4053 if (V_tcp_do_newsack)
4054 pipe = tcp_compute_pipe(tp);
4055 else
4056 pipe = (tp->snd_nxt - tp->snd_fack) +
4057 tp->sackhint.sack_bytes_rexmit;
4058 } else {
4059 if (tp->sackhint.prr_delivered < (tcprexmtthresh * maxseg +
4060 tp->snd_recover - tp->snd_una)) {
4061 del_data = maxseg;
4062 }
4063 pipe = imax(0, tp->snd_max - tp->snd_una -
4064 imin(INT_MAX / 65536, tp->t_dupacks) * maxseg);
4065 }
4066 tp->sackhint.prr_delivered += del_data;
4067 /*
4068 * Proportional Rate Reduction
4069 */
4070 if (pipe >= tp->snd_ssthresh) {
4071 if (tp->sackhint.recover_fs == 0)
4072 tp->sackhint.recover_fs =
4073 imax(1, tp->snd_nxt - tp->snd_una);
4074 snd_cnt = howmany((long)tp->sackhint.prr_delivered *
4075 tp->snd_ssthresh, tp->sackhint.recover_fs) -
4076 tp->sackhint.prr_out + maxseg - 1;
4077 } else {
4078 /*
4079 * PRR 6937bis heuristic:
4080 * - A partial ack without SACK block beneath snd_recover
4081 * indicates further loss.
4082 * - An SACK scoreboard update adding a new hole indicates
4083 * further loss, so be conservative and send at most one
4084 * segment.
4085 * - Prevent ACK splitting attacks, by being conservative
4086 * when no new data is acked.
4087 */
4088 if ((sack_changed == SACK_NEWLOSS) || (del_data == 0)) {
4089 limit = tp->sackhint.prr_delivered -
4090 tp->sackhint.prr_out;
4091 } else {
4092 limit = imax(tp->sackhint.prr_delivered -
4093 tp->sackhint.prr_out, del_data) +
4094 maxseg;
4095 }
4096 snd_cnt = imin((tp->snd_ssthresh - pipe), limit);
4097 }
4098 snd_cnt = imax(snd_cnt, 0) / maxseg;
4099 /*
4100 * Send snd_cnt new data into the network in response to this ack.
4101 * If there is going to be a SACK retransmission, adjust snd_cwnd
4102 * accordingly.
4103 */
4104 if (IN_FASTRECOVERY(tp->t_flags)) {
4105 if (tcp_is_sack_recovery(tp, to)) {
4106 tp->snd_cwnd = tp->snd_nxt - tp->snd_recover +
4107 tp->sackhint.sack_bytes_rexmit +
4108 (snd_cnt * maxseg);
4109 } else {
4110 tp->snd_cwnd = (tp->snd_max - tp->snd_una) +
4111 (snd_cnt * maxseg);
4112 }
4113 } else if (IN_CONGRECOVERY(tp->t_flags)) {
4114 tp->snd_cwnd = pipe - del_data + (snd_cnt * maxseg);
4115 }
4116 tp->snd_cwnd = imax(maxseg, tp->snd_cwnd);
4117 }
4118
4119 /*
4120 * On a partial ack arrives, force the retransmission of the
4121 * next unacknowledged segment. Do not clear tp->t_dupacks.
4122 * By setting snd_nxt to ti_ack, this forces retransmission timer to
4123 * be started again.
4124 */
4125 void
tcp_newreno_partial_ack(struct tcpcb * tp,struct tcphdr * th)4126 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
4127 {
4128 tcp_seq onxt = tp->snd_nxt;
4129 uint32_t ocwnd = tp->snd_cwnd;
4130 u_int maxseg = tcp_maxseg(tp);
4131
4132 INP_WLOCK_ASSERT(tptoinpcb(tp));
4133
4134 tcp_timer_activate(tp, TT_REXMT, 0);
4135 tp->t_rtttime = 0;
4136 tp->snd_nxt = th->th_ack;
4137 /*
4138 * Set snd_cwnd to one segment beyond acknowledged offset.
4139 * (tp->snd_una has not yet been updated when this function is called.)
4140 */
4141 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th);
4142 tp->t_flags |= TF_ACKNOW;
4143 (void) tcp_output(tp);
4144 tp->snd_cwnd = ocwnd;
4145 if (SEQ_GT(onxt, tp->snd_nxt))
4146 tp->snd_nxt = onxt;
4147 /*
4148 * Partial window deflation. Relies on fact that tp->snd_una
4149 * not updated yet.
4150 */
4151 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
4152 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
4153 else
4154 tp->snd_cwnd = 0;
4155 tp->snd_cwnd += maxseg;
4156 }
4157
4158 int
tcp_compute_pipe(struct tcpcb * tp)4159 tcp_compute_pipe(struct tcpcb *tp)
4160 {
4161 if (tp->t_fb->tfb_compute_pipe == NULL) {
4162 return (tp->snd_max - tp->snd_una +
4163 tp->sackhint.sack_bytes_rexmit -
4164 tp->sackhint.sacked_bytes -
4165 tp->sackhint.lost_bytes);
4166 } else {
4167 return((*tp->t_fb->tfb_compute_pipe)(tp));
4168 }
4169 }
4170
4171 uint32_t
tcp_compute_initwnd(uint32_t maxseg)4172 tcp_compute_initwnd(uint32_t maxseg)
4173 {
4174 /*
4175 * Calculate the Initial Window, also used as Restart Window
4176 *
4177 * RFC5681 Section 3.1 specifies the default conservative values.
4178 * RFC3390 specifies slightly more aggressive values.
4179 * RFC6928 increases it to ten segments.
4180 * Support for user specified value for initial flight size.
4181 */
4182 if (V_tcp_initcwnd_segments)
4183 return min(V_tcp_initcwnd_segments * maxseg,
4184 max(2 * maxseg, V_tcp_initcwnd_segments * 1460));
4185 else if (V_tcp_do_rfc3390)
4186 return min(4 * maxseg, max(2 * maxseg, 4380));
4187 else {
4188 /* Per RFC5681 Section 3.1 */
4189 if (maxseg > 2190)
4190 return (2 * maxseg);
4191 else if (maxseg > 1095)
4192 return (3 * maxseg);
4193 else
4194 return (4 * maxseg);
4195 }
4196 }
4197