1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95 63 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.20 2003/01/29 22:45:36 hsu Exp $ 64 */ 65 66 #include "opt_inet.h" 67 #include "opt_inet6.h" 68 #include "opt_tcpdebug.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/sysctl.h> 74 #include <sys/mbuf.h> 75 #include <sys/domain.h> 76 #include <sys/protosw.h> 77 #include <sys/socket.h> 78 #include <sys/socketvar.h> 79 #include <sys/in_cksum.h> 80 #include <sys/thread.h> 81 #include <sys/globaldata.h> 82 83 #include <net/if_var.h> 84 #include <net/route.h> 85 #include <net/netmsg2.h> 86 #include <net/netisr2.h> 87 88 #include <netinet/in.h> 89 #include <netinet/in_systm.h> 90 #include <netinet/ip.h> 91 #include <netinet/in_pcb.h> 92 #include <netinet/ip_var.h> 93 #include <netinet6/in6_pcb.h> 94 #include <netinet/ip6.h> 95 #include <netinet6/ip6_var.h> 96 #include <netinet/tcp.h> 97 #define TCPOUTFLAGS 98 #include <netinet/tcp_fsm.h> 99 #include <netinet/tcp_seq.h> 100 #include <netinet/tcp_timer.h> 101 #include <netinet/tcp_timer2.h> 102 #include <netinet/tcp_var.h> 103 #include <netinet/tcpip.h> 104 #ifdef TCPDEBUG 105 #include <netinet/tcp_debug.h> 106 #endif 107 108 #ifdef notyet 109 extern struct mbuf *m_copypack(); 110 #endif 111 112 int path_mtu_discovery = 1; 113 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW, 114 &path_mtu_discovery, 1, "Enable Path MTU Discovery"); 115 116 static int avoid_pure_win_update = 1; 117 SYSCTL_INT(_net_inet_tcp, OID_AUTO, avoid_pure_win_update, CTLFLAG_RW, 118 &avoid_pure_win_update, 1, "Avoid pure window updates when possible"); 119 120 /* 121 * 1 - enabled for increasing and decreasing the buffer size 122 * 2 - enabled only for increasing the buffer size 123 */ 124 int tcp_do_autosndbuf = 1; 125 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_RW, 126 &tcp_do_autosndbuf, 0, "Enable automatic send buffer sizing"); 127 128 int tcp_autosndbuf_inc = 8*1024; 129 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_RW, 130 &tcp_autosndbuf_inc, 0, "Incrementor step size of automatic send buffer"); 131 132 int tcp_autosndbuf_min = 32768; 133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_min, CTLFLAG_RW, 134 &tcp_autosndbuf_min, 0, "Min size of automatic send buffer"); 135 136 int tcp_autosndbuf_max = 2*1024*1024; 137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_RW, 138 &tcp_autosndbuf_max, 0, "Max size of automatic send buffer"); 139 140 int tcp_prio_synack = 1; 141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, prio_synack, CTLFLAG_RW, 142 &tcp_prio_synack, 0, "Prioritize SYN, SYN|ACK and pure ACK"); 143 144 static int tcp_idle_cwv = 1; 145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_cwv, CTLFLAG_RW, 146 &tcp_idle_cwv, 0, 147 "Congestion window validation after idle period (part of RFC2861)"); 148 149 static int tcp_idle_restart = 1; 150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_restart, CTLFLAG_RW, 151 &tcp_idle_restart, 0, "Reset congestion window after idle period"); 152 153 static int tcp_do_tso = 1; 154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW, 155 &tcp_do_tso, 0, "Enable TCP Segmentation Offload (TSO)"); 156 157 static int tcp_fairsend = 4; 158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fairsend, CTLFLAG_RW, 159 &tcp_fairsend, 0, 160 "Amount of segments sent before yield to other senders or receivers"); 161 162 static void tcp_idle_cwnd_validate(struct tcpcb *); 163 164 static int tcp_tso_getsize(struct tcpcb *tp, u_int *segsz, u_int *hlen); 165 static void tcp_output_sched(struct tcpcb *tp); 166 167 /* 168 * Tcp output routine: figure out what should be sent and send it. 169 */ 170 int 171 tcp_output(struct tcpcb *tp) 172 { 173 struct inpcb * const inp = tp->t_inpcb; 174 struct socket *so = inp->inp_socket; 175 long len, recvwin, sendwin; 176 int nsacked = 0; 177 int off, flags, error = 0; 178 #ifdef TCP_SIGNATURE 179 int sigoff = 0; 180 #endif 181 struct mbuf *m; 182 struct ip *ip; 183 struct tcphdr *th; 184 u_char opt[TCP_MAXOLEN]; 185 unsigned int ipoptlen, optlen, hdrlen; 186 int idle; 187 boolean_t sendalot; 188 struct ip6_hdr *ip6; 189 #ifdef INET6 190 const boolean_t isipv6 = INP_ISIPV6(inp); 191 #else 192 const boolean_t isipv6 = FALSE; 193 #endif 194 boolean_t can_tso = FALSE, use_tso; 195 boolean_t report_sack, idle_cwv = FALSE; 196 u_int segsz, tso_hlen, tso_lenmax = 0; 197 int segcnt = 0; 198 boolean_t need_sched = FALSE; 199 200 KKASSERT(so->so_port == &curthread->td_msgport); 201 202 /* 203 * Determine length of data that should be transmitted, 204 * and flags that will be used. 205 * If there is some data or critical controls (SYN, RST) 206 * to send, then transmit; otherwise, investigate further. 207 */ 208 209 /* 210 * If we have been idle for a while, the send congestion window 211 * could be no longer representative of the current state of the 212 * link; need to validate congestion window. However, we should 213 * not perform congestion window validation here, since we could 214 * be asked to send pure ACK. 215 */ 216 if (tp->snd_max == tp->snd_una && 217 (ticks - tp->snd_last) >= tp->t_rxtcur && tcp_idle_restart) 218 idle_cwv = TRUE; 219 220 /* 221 * Calculate whether the transmit stream was previously idle 222 * and adjust TF_LASTIDLE for the next time. 223 */ 224 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 225 if (idle && (tp->t_flags & TF_MORETOCOME)) 226 tp->t_flags |= TF_LASTIDLE; 227 else 228 tp->t_flags &= ~TF_LASTIDLE; 229 230 if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && 231 !IN_FASTRECOVERY(tp)) 232 nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt); 233 234 /* 235 * Find out whether TSO could be used or not 236 * 237 * For TSO capable devices, the following assumptions apply to 238 * the processing of TCP flags: 239 * - If FIN is set on the large TCP segment, the device must set 240 * FIN on the last segment that it creates from the large TCP 241 * segment. 242 * - If PUSH is set on the large TCP segment, the device must set 243 * PUSH on the last segment that it creates from the large TCP 244 * segment. 245 */ 246 if (tcp_do_tso 247 #ifdef TCP_SIGNATURE 248 && (tp->t_flags & TF_SIGNATURE) == 0 249 #endif 250 ) { 251 if (!isipv6) { 252 struct rtentry *rt = inp->inp_route.ro_rt; 253 254 if (rt != NULL && (rt->rt_flags & RTF_UP) && 255 (rt->rt_ifp->if_hwassist & CSUM_TSO)) { 256 can_tso = TRUE; 257 tso_lenmax = rt->rt_ifp->if_tsolen; 258 } 259 } 260 } 261 262 again: 263 m = NULL; 264 ip = NULL; 265 th = NULL; 266 ip6 = NULL; 267 268 if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) == 269 TF_SACK_PERMITTED && 270 (!TAILQ_EMPTY(&tp->t_segq) || 271 tp->reportblk.rblk_start != tp->reportblk.rblk_end)) 272 report_sack = TRUE; 273 else 274 report_sack = FALSE; 275 276 /* Make use of SACK information when slow-starting after a RTO. */ 277 if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && 278 !IN_FASTRECOVERY(tp)) { 279 tcp_seq old_snd_nxt = tp->snd_nxt; 280 281 tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt); 282 nsacked += tp->snd_nxt - old_snd_nxt; 283 } 284 285 sendalot = FALSE; 286 off = tp->snd_nxt - tp->snd_una; 287 sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked); 288 sendwin = min(sendwin, tp->snd_bwnd); 289 290 flags = tcp_outflags[tp->t_state]; 291 /* 292 * Get standard flags, and add SYN or FIN if requested by 'hidden' 293 * state flags. 294 */ 295 if (tp->t_flags & TF_NEEDFIN) 296 flags |= TH_FIN; 297 if (tp->t_flags & TF_NEEDSYN) 298 flags |= TH_SYN; 299 300 /* 301 * If in persist timeout with window of 0, send 1 byte. 302 * Otherwise, if window is small but nonzero 303 * and timer expired, we will send what we can 304 * and go to transmit state. 305 */ 306 if (tp->t_flags & TF_FORCE) { 307 if (sendwin == 0) { 308 /* 309 * If we still have some data to send, then 310 * clear the FIN bit. Usually this would 311 * happen below when it realizes that we 312 * aren't sending all the data. However, 313 * if we have exactly 1 byte of unsent data, 314 * then it won't clear the FIN bit below, 315 * and if we are in persist state, we wind 316 * up sending the packet without recording 317 * that we sent the FIN bit. 318 * 319 * We can't just blindly clear the FIN bit, 320 * because if we don't have any more data 321 * to send then the probe will be the FIN 322 * itself. 323 */ 324 if (off < so->so_snd.ssb_cc) 325 flags &= ~TH_FIN; 326 sendwin = 1; 327 } else { 328 tcp_callout_stop(tp, tp->tt_persist); 329 tp->t_rxtshift = 0; 330 } 331 } 332 333 /* 334 * If snd_nxt == snd_max and we have transmitted a FIN, the 335 * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in 336 * a negative length. This can also occur when TCP opens up 337 * its congestion window while receiving additional duplicate 338 * acks after fast-retransmit because TCP will reset snd_nxt 339 * to snd_max after the fast-retransmit. 340 * 341 * A negative length can also occur when we are in the 342 * TCPS_SYN_RECEIVED state due to a simultanious connect where 343 * our SYN has not been acked yet. 344 * 345 * In the normal retransmit-FIN-only case, however, snd_nxt will 346 * be set to snd_una, the offset will be 0, and the length may 347 * wind up 0. 348 */ 349 len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off; 350 351 /* 352 * Lop off SYN bit if it has already been sent. However, if this 353 * is SYN-SENT state and if segment contains data, suppress sending 354 * segment (sending the segment would be an option if we still 355 * did TAO and the remote host supported it). 356 */ 357 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { 358 flags &= ~TH_SYN; 359 off--, len++; 360 if (len > 0 && tp->t_state == TCPS_SYN_SENT) { 361 tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW); 362 return 0; 363 } 364 } 365 366 /* 367 * Be careful not to send data and/or FIN on SYN segments. 368 * This measure is needed to prevent interoperability problems 369 * with not fully conformant TCP implementations. 370 */ 371 if (flags & TH_SYN) { 372 len = 0; 373 flags &= ~TH_FIN; 374 } 375 376 if (len < 0) { 377 /* 378 * A negative len can occur if our FIN has been sent but not 379 * acked, or if we are in a simultanious connect in the 380 * TCPS_SYN_RECEIVED state with our SYN sent but not yet 381 * acked. 382 * 383 * If our window has contracted to 0 in the FIN case 384 * (which can only occur if we have NOT been called to 385 * retransmit as per code a few paragraphs up) then we 386 * want to shift the retransmit timer over to the 387 * persist timer. 388 * 389 * However, if we are in the TCPS_SYN_RECEIVED state 390 * (the SYN case) we will be in a simultanious connect and 391 * the window may be zero degeneratively. In this case we 392 * do not want to shift to the persist timer after the SYN 393 * or the SYN+ACK transmission. 394 */ 395 len = 0; 396 if (sendwin == 0 && tp->t_state != TCPS_SYN_RECEIVED) { 397 tcp_callout_stop(tp, tp->tt_rexmt); 398 tp->t_rxtshift = 0; 399 tp->snd_nxt = tp->snd_una; 400 if (!tcp_callout_active(tp, tp->tt_persist)) 401 tcp_setpersist(tp); 402 } 403 } 404 405 KASSERT(len >= 0, ("%s: len < 0", __func__)); 406 /* 407 * Automatic sizing of send socket buffer. Often the send buffer 408 * size is not optimally adjusted to the actual network conditions 409 * at hand (delay bandwidth product). Setting the buffer size too 410 * small limits throughput on links with high bandwidth and high 411 * delay (eg. trans-continental/oceanic links). Setting the 412 * buffer size too big consumes too much real kernel memory, 413 * especially with many connections on busy servers. 414 * 415 * The criteria to step up the send buffer one notch are: 416 * 1. receive window of remote host is larger than send buffer 417 * (with a fudge factor of 5/4th); 418 * 2. hiwat has not significantly exceeded bwnd (inflight) 419 * (bwnd is a maximal value if inflight is disabled). 420 * 3. send buffer is filled to 7/8th with data (so we actually 421 * have data to make use of it); 422 * 4. hiwat has not hit maximal automatic size; 423 * 5. our send window (slow start and cogestion controlled) is 424 * larger than sent but unacknowledged data in send buffer. 425 * 426 * The remote host receive window scaling factor may limit the 427 * growing of the send buffer before it reaches its allowed 428 * maximum. 429 * 430 * It scales directly with slow start or congestion window 431 * and does at most one step per received ACK. This fast 432 * scaling has the drawback of growing the send buffer beyond 433 * what is strictly necessary to make full use of a given 434 * delay*bandwith product. However testing has shown this not 435 * to be much of an problem. At worst we are trading wasting 436 * of available bandwith (the non-use of it) for wasting some 437 * socket buffer memory. 438 * 439 * The criteria for shrinking the buffer is based solely on 440 * the inflight code (snd_bwnd). If inflight is disabled, 441 * the buffer will not be shrinked. Note that snd_bwnd already 442 * has a fudge factor. Our test adds a little hysteresis. 443 */ 444 if (tcp_do_autosndbuf && (so->so_snd.ssb_flags & SSB_AUTOSIZE)) { 445 const int asbinc = tcp_autosndbuf_inc; 446 const int hiwat = so->so_snd.ssb_hiwat; 447 const int lowat = so->so_snd.ssb_lowat; 448 u_long newsize; 449 450 if ((tp->snd_wnd / 4 * 5) >= hiwat && 451 so->so_snd.ssb_cc >= (hiwat / 8 * 7) && 452 hiwat < tp->snd_bwnd + hiwat / 10 && 453 hiwat + asbinc < tcp_autosndbuf_max && 454 hiwat < (TCP_MAXWIN << tp->snd_scale) && 455 sendwin >= (so->so_snd.ssb_cc - 456 (tp->snd_nxt - tp->snd_una))) { 457 newsize = ulmin(hiwat + asbinc, tcp_autosndbuf_max); 458 if (!ssb_reserve(&so->so_snd, newsize, so, NULL)) 459 atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); 460 #if 0 461 if (newsize >= (TCP_MAXWIN << tp->snd_scale)) 462 atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); 463 #endif 464 } else if ((long)tp->snd_bwnd < 465 (long)(hiwat * 3 / 4 - lowat - asbinc) && 466 hiwat > tp->t_maxseg * 2 + asbinc && 467 hiwat + asbinc >= tcp_autosndbuf_min && 468 tcp_do_autosndbuf == 1) { 469 newsize = ulmax(hiwat - asbinc, tp->t_maxseg * 2); 470 ssb_reserve(&so->so_snd, newsize, so, NULL); 471 } 472 } 473 474 /* 475 * Don't use TSO, if: 476 * - Congestion window needs validation 477 * - There are SACK blocks to report 478 * - RST or SYN flags is set 479 * - URG will be set 480 * 481 * XXX 482 * Checking for SYN|RST looks overkill, just to be safe than sorry 483 */ 484 use_tso = can_tso; 485 if (report_sack || idle_cwv || (flags & (TH_RST | TH_SYN))) 486 use_tso = FALSE; 487 if (use_tso) { 488 tcp_seq ugr_nxt = tp->snd_nxt; 489 490 if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) && 491 tp->snd_nxt == tp->snd_max) 492 --ugr_nxt; 493 494 if (SEQ_GT(tp->snd_up, ugr_nxt)) 495 use_tso = FALSE; 496 } 497 498 if (use_tso) { 499 /* 500 * Find out segment size and header length for TSO 501 */ 502 error = tcp_tso_getsize(tp, &segsz, &tso_hlen); 503 if (error) 504 use_tso = FALSE; 505 } 506 if (!use_tso) { 507 segsz = tp->t_maxseg; 508 tso_hlen = 0; /* not used */ 509 } 510 511 /* 512 * Truncate to the maximum segment length if not TSO, and ensure that 513 * FIN is removed if the length no longer contains the last data byte. 514 */ 515 if (len > segsz) { 516 if (!use_tso) { 517 len = segsz; 518 ++segcnt; 519 } else { 520 int nsegs; 521 522 if (__predict_false(tso_lenmax < segsz)) 523 tso_lenmax = segsz << 1; 524 525 /* 526 * Truncate TSO transfers to (IP_MAXPACKET - iphlen - 527 * thoff), and make sure that we send equal size 528 * transfers down the stack (rather than big-small- 529 * big-small-...). 530 */ 531 len = min(len, tso_lenmax); 532 nsegs = min(len, (IP_MAXPACKET - tso_hlen)) / segsz; 533 KKASSERT(nsegs > 0); 534 535 len = nsegs * segsz; 536 537 if (len <= segsz) { 538 use_tso = FALSE; 539 ++segcnt; 540 } else { 541 segcnt += nsegs; 542 } 543 } 544 sendalot = TRUE; 545 } else { 546 use_tso = FALSE; 547 if (len > 0) 548 ++segcnt; 549 } 550 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc)) 551 flags &= ~TH_FIN; 552 553 recvwin = ssb_space(&so->so_rcv); 554 555 /* 556 * Sender silly window avoidance. We transmit under the following 557 * conditions when len is non-zero: 558 * 559 * - We have a full segment 560 * - This is the last buffer in a write()/send() and we are 561 * either idle or running NODELAY 562 * - we've timed out (e.g. persist timer) 563 * - we have more then 1/2 the maximum send window's worth of 564 * data (receiver may be limiting the window size) 565 * - we need to retransmit 566 */ 567 if (len) { 568 if (len >= segsz) 569 goto send; 570 /* 571 * NOTE! on localhost connections an 'ack' from the remote 572 * end may occur synchronously with the output and cause 573 * us to flush a buffer queued with moretocome. XXX 574 * 575 * note: the len + off check is almost certainly unnecessary. 576 */ 577 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 578 (idle || (tp->t_flags & TF_NODELAY)) && 579 len + off >= so->so_snd.ssb_cc && 580 !(tp->t_flags & TF_NOPUSH)) { 581 goto send; 582 } 583 if (tp->t_flags & TF_FORCE) /* typ. timeout case */ 584 goto send; 585 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) 586 goto send; 587 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */ 588 goto send; 589 if (tp->t_flags & TF_XMITNOW) 590 goto send; 591 } 592 593 /* 594 * Compare available window to amount of window 595 * known to peer (as advertised window less 596 * next expected input). If the difference is at least two 597 * max size segments, or at least 50% of the maximum possible 598 * window, then want to send a window update to peer. 599 */ 600 if (recvwin > 0) { 601 /* 602 * "adv" is the amount we can increase the window, 603 * taking into account that we are limited by 604 * TCP_MAXWIN << tp->rcv_scale. 605 */ 606 long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) - 607 (tp->rcv_adv - tp->rcv_nxt); 608 long hiwat; 609 610 /* 611 * This ack case typically occurs when the user has drained 612 * the TCP socket buffer sufficiently to warrent an ack 613 * containing a 'pure window update'... that is, an ack that 614 * ONLY updates the tcp window. 615 * 616 * It is unclear why we would need to do a pure window update 617 * past 2 segments if we are going to do one at 1/2 the high 618 * water mark anyway, especially since under normal conditions 619 * the user program will drain the socket buffer quickly. 620 * The 2-segment pure window update will often add a large 621 * number of extra, unnecessary acks to the stream. 622 * 623 * avoid_pure_win_update now defaults to 1. 624 */ 625 if (avoid_pure_win_update == 0 || 626 (tp->t_flags & TF_RXRESIZED)) { 627 if (adv >= (long) (2 * segsz)) { 628 goto send; 629 } 630 } 631 hiwat = (long)(TCP_MAXWIN << tp->rcv_scale); 632 if (hiwat > (long)so->so_rcv.ssb_hiwat) 633 hiwat = (long)so->so_rcv.ssb_hiwat; 634 if (adv >= hiwat / 2) 635 goto send; 636 } 637 638 /* 639 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 640 * is also a catch-all for the retransmit timer timeout case. 641 */ 642 if (tp->t_flags & TF_ACKNOW) 643 goto send; 644 if ((flags & TH_RST) || 645 ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN))) 646 goto send; 647 if (SEQ_GT(tp->snd_up, tp->snd_una)) 648 goto send; 649 /* 650 * If our state indicates that FIN should be sent 651 * and we have not yet done so, then we need to send. 652 */ 653 if ((flags & TH_FIN) && 654 (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) 655 goto send; 656 657 /* 658 * TCP window updates are not reliable, rather a polling protocol 659 * using ``persist'' packets is used to insure receipt of window 660 * updates. The three ``states'' for the output side are: 661 * idle not doing retransmits or persists 662 * persisting to move a small or zero window 663 * (re)transmitting and thereby not persisting 664 * 665 * tcp_callout_active(tp, tp->tt_persist) 666 * is true when we are in persist state. 667 * The TF_FORCE flag in tp->t_flags 668 * is set when we are called to send a persist packet. 669 * tcp_callout_active(tp, tp->tt_rexmt) 670 * is set when we are retransmitting 671 * The output side is idle when both timers are zero. 672 * 673 * If send window is too small, there is data to transmit, and no 674 * retransmit or persist is pending, then go to persist state. 675 * 676 * If nothing happens soon, send when timer expires: 677 * if window is nonzero, transmit what we can, otherwise force out 678 * a byte. 679 * 680 * Don't try to set the persist state if we are in TCPS_SYN_RECEIVED 681 * with data pending. This situation can occur during a 682 * simultanious connect. 683 */ 684 if (so->so_snd.ssb_cc > 0 && 685 tp->t_state != TCPS_SYN_RECEIVED && 686 !tcp_callout_active(tp, tp->tt_rexmt) && 687 !tcp_callout_active(tp, tp->tt_persist)) { 688 tp->t_rxtshift = 0; 689 tcp_setpersist(tp); 690 } 691 692 /* 693 * No reason to send a segment, just return. 694 */ 695 tp->t_flags &= ~TF_XMITNOW; 696 return (0); 697 698 send: 699 if (need_sched && len > 0) { 700 tcp_output_sched(tp); 701 return 0; 702 } 703 704 /* 705 * Before ESTABLISHED, force sending of initial options 706 * unless TCP set not to do any options. 707 * NOTE: we assume that the IP/TCP header plus TCP options 708 * always fit in a single mbuf, leaving room for a maximum 709 * link header, i.e. 710 * max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES 711 */ 712 optlen = 0; 713 if (isipv6) 714 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 715 else 716 hdrlen = sizeof(struct tcpiphdr); 717 if (flags & TH_SYN) { 718 tp->snd_nxt = tp->iss; 719 if (!(tp->t_flags & TF_NOOPT)) { 720 u_short mss; 721 722 opt[0] = TCPOPT_MAXSEG; 723 opt[1] = TCPOLEN_MAXSEG; 724 mss = htons((u_short) tcp_mssopt(tp)); 725 memcpy(opt + 2, &mss, sizeof mss); 726 optlen = TCPOLEN_MAXSEG; 727 728 if ((tp->t_flags & TF_REQ_SCALE) && 729 (!(flags & TH_ACK) || 730 (tp->t_flags & TF_RCVD_SCALE))) { 731 *((u_int32_t *)(opt + optlen)) = htonl( 732 TCPOPT_NOP << 24 | 733 TCPOPT_WINDOW << 16 | 734 TCPOLEN_WINDOW << 8 | 735 tp->request_r_scale); 736 optlen += 4; 737 } 738 739 if ((tcp_do_sack && !(flags & TH_ACK)) || 740 tp->t_flags & TF_SACK_PERMITTED) { 741 uint32_t *lp = (uint32_t *)(opt + optlen); 742 743 *lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED); 744 optlen += TCPOLEN_SACK_PERMITTED_ALIGNED; 745 } 746 } 747 } 748 749 /* 750 * Send a timestamp and echo-reply if this is a SYN and our side 751 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side 752 * and our peer have sent timestamps in our SYN's. 753 */ 754 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 755 !(flags & TH_RST) && 756 (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) { 757 u_int32_t *lp = (u_int32_t *)(opt + optlen); 758 759 /* Form timestamp option as shown in appendix A of RFC 1323. */ 760 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 761 *lp++ = htonl(ticks); 762 *lp = htonl(tp->ts_recent); 763 optlen += TCPOLEN_TSTAMP_APPA; 764 } 765 766 /* Set receive buffer autosizing timestamp. */ 767 if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) 768 tp->rfbuf_ts = ticks; 769 770 /* 771 * If this is a SACK connection and we have a block to report, 772 * fill in the SACK blocks in the TCP options. 773 */ 774 if (report_sack) 775 tcp_sack_fill_report(tp, opt, &optlen); 776 777 #ifdef TCP_SIGNATURE 778 if (tp->t_flags & TF_SIGNATURE) { 779 int i; 780 u_char *bp; 781 /* 782 * Initialize TCP-MD5 option (RFC2385) 783 */ 784 bp = (u_char *)opt + optlen; 785 *bp++ = TCPOPT_SIGNATURE; 786 *bp++ = TCPOLEN_SIGNATURE; 787 sigoff = optlen + 2; 788 for (i = 0; i < TCP_SIGLEN; i++) 789 *bp++ = 0; 790 optlen += TCPOLEN_SIGNATURE; 791 /* 792 * Terminate options list and maintain 32-bit alignment. 793 */ 794 *bp++ = TCPOPT_NOP; 795 *bp++ = TCPOPT_EOL; 796 optlen += 2; 797 } 798 #endif /* TCP_SIGNATURE */ 799 KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options")); 800 hdrlen += optlen; 801 802 if (isipv6) { 803 ipoptlen = ip6_optlen(inp); 804 } else { 805 if (inp->inp_options) { 806 ipoptlen = inp->inp_options->m_len - 807 offsetof(struct ipoption, ipopt_list); 808 } else { 809 ipoptlen = 0; 810 } 811 } 812 813 if (use_tso) { 814 /* TSO segment length must be multiple of segment size */ 815 KASSERT(len >= (2 * segsz) && (len % segsz == 0), 816 ("invalid TSO len %ld, segsz %u", len, segsz)); 817 } else { 818 KASSERT(len <= segsz, 819 ("invalid len %ld, segsz %u", len, segsz)); 820 821 /* 822 * Adjust data length if insertion of options will bump 823 * the packet length beyond the t_maxopd length. Clear 824 * FIN to prevent premature closure since there is still 825 * more data to send after this (now truncated) packet. 826 * 827 * If just the options do not fit we are in a no-win 828 * situation and we treat it as an unreachable host. 829 */ 830 if (len + optlen + ipoptlen > tp->t_maxopd) { 831 if (tp->t_maxopd <= optlen + ipoptlen) { 832 static time_t last_optlen_report; 833 834 if (last_optlen_report != time_uptime) { 835 last_optlen_report = time_uptime; 836 kprintf("tcpcb %p: MSS (%d) too " 837 "small to hold options!\n", 838 tp, tp->t_maxopd); 839 } 840 error = EHOSTUNREACH; 841 goto out; 842 } else { 843 flags &= ~TH_FIN; 844 len = tp->t_maxopd - optlen - ipoptlen; 845 sendalot = TRUE; 846 } 847 } 848 } 849 850 #ifdef INET6 851 KASSERT(max_linkhdr + hdrlen <= MCLBYTES, ("tcphdr too big")); 852 #else 853 KASSERT(max_linkhdr + hdrlen <= MHLEN, ("tcphdr too big")); 854 #endif 855 856 /* 857 * Grab a header mbuf, attaching a copy of data to 858 * be transmitted, and initialize the header from 859 * the template for sends on this connection. 860 */ 861 if (len) { 862 if ((tp->t_flags & TF_FORCE) && len == 1) 863 tcpstat.tcps_sndprobe++; 864 else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 865 if (tp->snd_nxt == tp->snd_una) 866 tp->snd_max_rexmt = tp->snd_max; 867 if (nsacked) { 868 tcpstat.tcps_sndsackrtopack++; 869 tcpstat.tcps_sndsackrtobyte += len; 870 } 871 tcpstat.tcps_sndrexmitpack++; 872 tcpstat.tcps_sndrexmitbyte += len; 873 } else { 874 tcpstat.tcps_sndpack++; 875 tcpstat.tcps_sndbyte += len; 876 } 877 if (idle_cwv) { 878 idle_cwv = FALSE; 879 tcp_idle_cwnd_validate(tp); 880 } 881 /* Update last send time after CWV */ 882 tp->snd_last = ticks; 883 #ifdef notyet 884 if ((m = m_copypack(so->so_snd.ssb_mb, off, (int)len, 885 max_linkhdr + hdrlen)) == NULL) { 886 error = ENOBUFS; 887 goto after_th; 888 } 889 /* 890 * m_copypack left space for our hdr; use it. 891 */ 892 m->m_len += hdrlen; 893 m->m_data -= hdrlen; 894 #else 895 #ifndef INET6 896 m = m_gethdr(M_NOWAIT, MT_HEADER); 897 #else 898 m = m_getl(hdrlen + max_linkhdr, M_NOWAIT, MT_HEADER, 899 M_PKTHDR, NULL); 900 #endif 901 if (m == NULL) { 902 error = ENOBUFS; 903 goto after_th; 904 } 905 m->m_data += max_linkhdr; 906 m->m_len = hdrlen; 907 if (len <= MHLEN - hdrlen - max_linkhdr) { 908 m_copydata(so->so_snd.ssb_mb, off, (int) len, 909 mtod(m, caddr_t) + hdrlen); 910 m->m_len += len; 911 } else { 912 m->m_next = m_copy(so->so_snd.ssb_mb, off, (int) len); 913 if (m->m_next == NULL) { 914 m_free(m); 915 m = NULL; 916 error = ENOBUFS; 917 goto after_th; 918 } 919 } 920 #endif 921 /* 922 * If we're sending everything we've got, set PUSH. 923 * (This will keep happy those implementations which only 924 * give data to the user when a buffer fills or 925 * a PUSH comes in.) 926 */ 927 if (off + len == so->so_snd.ssb_cc) 928 flags |= TH_PUSH; 929 } else { 930 if (tp->t_flags & TF_ACKNOW) 931 tcpstat.tcps_sndacks++; 932 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 933 tcpstat.tcps_sndctrl++; 934 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 935 tcpstat.tcps_sndurg++; 936 else 937 tcpstat.tcps_sndwinup++; 938 939 MGETHDR(m, M_NOWAIT, MT_HEADER); 940 if (m == NULL) { 941 error = ENOBUFS; 942 goto after_th; 943 } 944 if (isipv6 && 945 (hdrlen + max_linkhdr > MHLEN) && hdrlen <= MHLEN) 946 MH_ALIGN(m, hdrlen); 947 else 948 m->m_data += max_linkhdr; 949 m->m_len = hdrlen; 950 951 /* 952 * Prioritize SYN, SYN|ACK and pure ACK. 953 * Leave FIN and RST as they are. 954 */ 955 if (tcp_prio_synack && (flags & (TH_FIN | TH_RST)) == 0) 956 m->m_flags |= M_PRIO; 957 } 958 m->m_pkthdr.rcvif = NULL; 959 if (isipv6) { 960 ip6 = mtod(m, struct ip6_hdr *); 961 th = (struct tcphdr *)(ip6 + 1); 962 tcp_fillheaders(tp, ip6, th, use_tso); 963 } else { 964 ip = mtod(m, struct ip *); 965 th = (struct tcphdr *)(ip + 1); 966 /* this picks up the pseudo header (w/o the length) */ 967 tcp_fillheaders(tp, ip, th, use_tso); 968 } 969 after_th: 970 /* 971 * Fill in fields, remembering maximum advertised 972 * window for use in delaying messages about window sizes. 973 * If resending a FIN, be sure not to use a new sequence number. 974 */ 975 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 976 tp->snd_nxt == tp->snd_max) 977 tp->snd_nxt--; 978 979 if (th != NULL) { 980 /* 981 * If we are doing retransmissions, then snd_nxt will 982 * not reflect the first unsent octet. For ACK only 983 * packets, we do not want the sequence number of the 984 * retransmitted packet, we want the sequence number 985 * of the next unsent octet. So, if there is no data 986 * (and no SYN or FIN), use snd_max instead of snd_nxt 987 * when filling in ti_seq. But if we are in persist 988 * state, snd_max might reflect one byte beyond the 989 * right edge of the window, so use snd_nxt in that 990 * case, since we know we aren't doing a retransmission. 991 * (retransmit and persist are mutually exclusive...) 992 */ 993 if (len || (flags & (TH_SYN|TH_FIN)) || 994 tcp_callout_active(tp, tp->tt_persist)) 995 th->th_seq = htonl(tp->snd_nxt); 996 else 997 th->th_seq = htonl(tp->snd_max); 998 th->th_ack = htonl(tp->rcv_nxt); 999 if (optlen) { 1000 bcopy(opt, th + 1, optlen); 1001 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1002 } 1003 th->th_flags = flags; 1004 } 1005 1006 /* 1007 * Calculate receive window. Don't shrink window, but avoid 1008 * silly window syndrome by sending a 0 window if the actual 1009 * window is less then one segment. 1010 */ 1011 if (recvwin < (long)(so->so_rcv.ssb_hiwat / 4) && 1012 recvwin < (long)segsz) 1013 recvwin = 0; 1014 if (recvwin < (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt)) 1015 recvwin = (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt); 1016 if (recvwin > (long)TCP_MAXWIN << tp->rcv_scale) 1017 recvwin = (long)TCP_MAXWIN << tp->rcv_scale; 1018 1019 /* 1020 * Adjust the RXWIN0SENT flag - indicate that we have advertised 1021 * a 0 window. This may cause the remote transmitter to stall. This 1022 * flag tells soreceive() to disable delayed acknowledgements when 1023 * draining the buffer. This can occur if the receiver is attempting 1024 * to read more data then can be buffered prior to transmitting on 1025 * the connection. 1026 */ 1027 if (recvwin == 0) 1028 tp->t_flags |= TF_RXWIN0SENT; 1029 else 1030 tp->t_flags &= ~TF_RXWIN0SENT; 1031 1032 if (th != NULL) 1033 th->th_win = htons((u_short) (recvwin>>tp->rcv_scale)); 1034 1035 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 1036 KASSERT(!use_tso, ("URG with TSO")); 1037 if (th != NULL) { 1038 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 1039 th->th_flags |= TH_URG; 1040 } 1041 } else { 1042 /* 1043 * If no urgent pointer to send, then we pull 1044 * the urgent pointer to the left edge of the send window 1045 * so that it doesn't drift into the send window on sequence 1046 * number wraparound. 1047 */ 1048 tp->snd_up = tp->snd_una; /* drag it along */ 1049 } 1050 1051 if (th != NULL) { 1052 #ifdef TCP_SIGNATURE 1053 if (tp->t_flags & TF_SIGNATURE) { 1054 tcpsignature_compute(m, len, optlen, 1055 (u_char *)(th + 1) + sigoff, IPSEC_DIR_OUTBOUND); 1056 } 1057 #endif /* TCP_SIGNATURE */ 1058 1059 /* 1060 * Put TCP length in extended header, and then 1061 * checksum extended header and data. 1062 */ 1063 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 1064 if (isipv6) { 1065 /* 1066 * ip6_plen is not need to be filled now, and will be 1067 * filled in ip6_output(). 1068 */ 1069 th->th_sum = in6_cksum(m, IPPROTO_TCP, 1070 sizeof(struct ip6_hdr), 1071 sizeof(struct tcphdr) + optlen + len); 1072 } else { 1073 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr) + optlen; 1074 if (use_tso) { 1075 m->m_pkthdr.csum_flags = CSUM_TSO; 1076 m->m_pkthdr.tso_segsz = segsz; 1077 } else { 1078 m->m_pkthdr.csum_flags = CSUM_TCP; 1079 m->m_pkthdr.csum_data = 1080 offsetof(struct tcphdr, th_sum); 1081 if (len + optlen) { 1082 th->th_sum = in_addword(th->th_sum, 1083 htons((u_short)(optlen + len))); 1084 } 1085 } 1086 1087 /* 1088 * IP version must be set here for ipv4/ipv6 checking 1089 * later 1090 */ 1091 KASSERT(ip->ip_v == IPVERSION, 1092 ("%s: IP version incorrect: %d", 1093 __func__, ip->ip_v)); 1094 } 1095 } 1096 1097 /* 1098 * In transmit state, time the transmission and arrange for 1099 * the retransmit. In persist state, just set snd_max. 1100 */ 1101 if (!(tp->t_flags & TF_FORCE) || 1102 !tcp_callout_active(tp, tp->tt_persist)) { 1103 tcp_seq startseq = tp->snd_nxt; 1104 1105 /* 1106 * Advance snd_nxt over sequence space of this segment. 1107 */ 1108 if (flags & (TH_SYN | TH_FIN)) { 1109 if (flags & TH_SYN) 1110 tp->snd_nxt++; 1111 if (flags & TH_FIN) { 1112 tp->snd_nxt++; 1113 tp->t_flags |= TF_SENTFIN; 1114 } 1115 } 1116 tp->snd_nxt += len; 1117 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 1118 tp->snd_max = tp->snd_nxt; 1119 /* 1120 * Time this transmission if not a retransmission and 1121 * not currently timing anything. 1122 */ 1123 if (tp->t_rtttime == 0) { 1124 tp->t_rtttime = ticks; 1125 tp->t_rtseq = startseq; 1126 tcpstat.tcps_segstimed++; 1127 } 1128 } 1129 1130 /* 1131 * Set retransmit timer if not currently set, 1132 * and not doing a pure ack or a keep-alive probe. 1133 * Initial value for retransmit timer is smoothed 1134 * round-trip time + 2 * round-trip time variance. 1135 * Initialize shift counter which is used for backoff 1136 * of retransmit time. 1137 */ 1138 if (!tcp_callout_active(tp, tp->tt_rexmt) && 1139 tp->snd_nxt != tp->snd_una) { 1140 if (tcp_callout_active(tp, tp->tt_persist)) { 1141 tcp_callout_stop(tp, tp->tt_persist); 1142 tp->t_rxtshift = 0; 1143 } 1144 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur, 1145 tcp_timer_rexmt); 1146 } else if (len == 0 && so->so_snd.ssb_cc && 1147 tp->t_state > TCPS_SYN_RECEIVED && 1148 !tcp_callout_active(tp, tp->tt_rexmt) && 1149 !tcp_callout_active(tp, tp->tt_persist)) { 1150 /* 1151 * Avoid a situation where we do not set persist timer 1152 * after a zero window condition. For example: 1153 * 1) A -> B: packet with enough data to fill the window 1154 * 2) B -> A: ACK for #1 + new data (0 window 1155 * advertisement) 1156 * 3) A -> B: ACK for #2, 0 len packet 1157 * 1158 * In this case, A will not activate the persist timer, 1159 * because it chose to send a packet. Unless tcp_output 1160 * is called for some other reason (delayed ack timer, 1161 * another input packet from B, socket syscall), A will 1162 * not send zero window probes. 1163 * 1164 * So, if you send a 0-length packet, but there is data 1165 * in the socket buffer, and neither the rexmt or 1166 * persist timer is already set, then activate the 1167 * persist timer. 1168 */ 1169 tp->t_rxtshift = 0; 1170 tcp_setpersist(tp); 1171 } 1172 } else { 1173 /* 1174 * Persist case, update snd_max but since we are in 1175 * persist mode (no window) we do not update snd_nxt. 1176 */ 1177 int xlen = len; 1178 if (flags & TH_SYN) 1179 panic("tcp_output: persist timer to send SYN"); 1180 if (flags & TH_FIN) { 1181 ++xlen; 1182 tp->t_flags |= TF_SENTFIN; 1183 } 1184 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) 1185 tp->snd_max = tp->snd_nxt + xlen; 1186 } 1187 1188 if (th != NULL) { 1189 #ifdef TCPDEBUG 1190 /* Trace. */ 1191 if (so->so_options & SO_DEBUG) { 1192 tcp_trace(TA_OUTPUT, tp->t_state, tp, 1193 mtod(m, void *), th, 0); 1194 } 1195 #endif 1196 1197 /* 1198 * Fill in IP length and desired time to live and 1199 * send to IP level. There should be a better way 1200 * to handle ttl and tos; we could keep them in 1201 * the template, but need a way to checksum without them. 1202 */ 1203 /* 1204 * m->m_pkthdr.len should have been set before cksum 1205 * calcuration, because in6_cksum() need it. 1206 */ 1207 if (isipv6) { 1208 /* 1209 * we separately set hoplimit for every segment, 1210 * since the user might want to change the value 1211 * via setsockopt. Also, desired default hop 1212 * limit might be changed via Neighbor Discovery. 1213 */ 1214 ip6->ip6_hlim = in6_selecthlim(inp, 1215 (inp->in6p_route.ro_rt ? 1216 inp->in6p_route.ro_rt->rt_ifp : NULL)); 1217 1218 /* TODO: IPv6 IP6TOS_ECT bit on */ 1219 error = ip6_output(m, inp->in6p_outputopts, 1220 &inp->in6p_route, (so->so_options & SO_DONTROUTE), 1221 NULL, NULL, inp); 1222 } else { 1223 struct rtentry *rt; 1224 1225 KASSERT(!INP_CHECK_SOCKAF(so, AF_INET6), ("inet6 pcb")); 1226 1227 ip->ip_len = m->m_pkthdr.len; 1228 ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ 1229 ip->ip_tos = inp->inp_ip_tos; /* XXX */ 1230 /* 1231 * See if we should do MTU discovery. 1232 * We do it only if the following are true: 1233 * 1) we have a valid route to the destination 1234 * 2) the MTU is not locked (if it is, 1235 * then discovery has been disabled) 1236 */ 1237 if (path_mtu_discovery && 1238 (rt = inp->inp_route.ro_rt) && 1239 (rt->rt_flags & RTF_UP) && 1240 !(rt->rt_rmx.rmx_locks & RTV_MTU)) 1241 ip->ip_off |= IP_DF; 1242 1243 KASSERT(inp->inp_flags & INP_HASH, 1244 ("inpcb has no hash")); 1245 m_sethash(m, inp->inp_hashval); 1246 error = ip_output(m, inp->inp_options, &inp->inp_route, 1247 (so->so_options & SO_DONTROUTE) | 1248 IP_DEBUGROUTE, NULL, inp); 1249 } 1250 } else { 1251 KASSERT(error != 0, ("no error, but th not set")); 1252 } 1253 if (error) { 1254 tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW); 1255 1256 /* 1257 * We know that the packet was lost, so back out the 1258 * sequence number advance, if any. 1259 */ 1260 if (!(tp->t_flags & TF_FORCE) || 1261 !tcp_callout_active(tp, tp->tt_persist)) { 1262 /* 1263 * No need to check for TH_FIN here because 1264 * the TF_SENTFIN flag handles that case. 1265 */ 1266 if (!(flags & TH_SYN)) 1267 tp->snd_nxt -= len; 1268 } 1269 1270 out: 1271 if (error == ENOBUFS) { 1272 KASSERT((len == 0 && (flags & (TH_SYN | TH_FIN)) == 0) || 1273 tcp_callout_active(tp, tp->tt_rexmt) || 1274 tcp_callout_active(tp, tp->tt_persist), 1275 ("neither rexmt nor persist timer is set")); 1276 return (0); 1277 } 1278 if (error == EMSGSIZE) { 1279 /* 1280 * ip_output() will have already fixed the route 1281 * for us. tcp_mtudisc() will, as its last action, 1282 * initiate retransmission, so it is important to 1283 * not do so here. 1284 */ 1285 tcp_mtudisc(inp, 0); 1286 return 0; 1287 } 1288 if ((error == EHOSTUNREACH || error == ENETDOWN) && 1289 TCPS_HAVERCVDSYN(tp->t_state)) { 1290 tp->t_softerror = error; 1291 return (0); 1292 } 1293 return (error); 1294 } 1295 tcpstat.tcps_sndtotal++; 1296 1297 /* 1298 * Data sent (as far as we can tell). 1299 * 1300 * If this advertises a larger window than any other segment, 1301 * then remember the size of the advertised window. 1302 * 1303 * Any pending ACK has now been sent. 1304 */ 1305 if (recvwin > 0 && SEQ_GT(tp->rcv_nxt + recvwin, tp->rcv_adv)) { 1306 tp->rcv_adv = tp->rcv_nxt + recvwin; 1307 tp->t_flags &= ~TF_RXRESIZED; 1308 } 1309 tp->last_ack_sent = tp->rcv_nxt; 1310 tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW); 1311 if (tcp_delack_enabled) 1312 tcp_callout_stop(tp, tp->tt_delack); 1313 if (sendalot) { 1314 if (tcp_fairsend > 0 && (tp->t_flags & TF_FAIRSEND) && 1315 segcnt >= tcp_fairsend) 1316 need_sched = TRUE; 1317 goto again; 1318 } 1319 return (0); 1320 } 1321 1322 void 1323 tcp_setpersist(struct tcpcb *tp) 1324 { 1325 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; 1326 int tt; 1327 1328 if (tp->t_state == TCPS_SYN_SENT || 1329 tp->t_state == TCPS_SYN_RECEIVED) { 1330 panic("tcp_setpersist: not established yet, current %s", 1331 tp->t_state == TCPS_SYN_SENT ? 1332 "SYN_SENT" : "SYN_RECEIVED"); 1333 } 1334 1335 if (tcp_callout_active(tp, tp->tt_rexmt)) 1336 panic("tcp_setpersist: retransmit pending"); 1337 /* 1338 * Start/restart persistance timer. 1339 */ 1340 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], TCPTV_PERSMIN, 1341 TCPTV_PERSMAX); 1342 tcp_callout_reset(tp, tp->tt_persist, tt, tcp_timer_persist); 1343 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 1344 tp->t_rxtshift++; 1345 } 1346 1347 static void 1348 tcp_idle_cwnd_validate(struct tcpcb *tp) 1349 { 1350 u_long initial_cwnd = tcp_initial_window(tp); 1351 u_long min_cwnd; 1352 1353 tcpstat.tcps_sndidle++; 1354 1355 /* According to RFC5681: RW=min(IW,cwnd) */ 1356 min_cwnd = min(tp->snd_cwnd, initial_cwnd); 1357 1358 if (tcp_idle_cwv) { 1359 u_long idle_time, decay_cwnd; 1360 1361 /* 1362 * RFC2861, but only after idle period. 1363 */ 1364 1365 /* 1366 * Before the congestion window is reduced, ssthresh 1367 * is set to the maximum of its current value and 3/4 1368 * cwnd. If the sender then has more data to send 1369 * than the decayed cwnd allows, the TCP will slow- 1370 * start (perform exponential increase) at least 1371 * half-way back up to the old value of cwnd. 1372 */ 1373 tp->snd_ssthresh = max(tp->snd_ssthresh, 1374 (3 * tp->snd_cwnd) / 4); 1375 1376 /* 1377 * Decay the congestion window by half for every RTT 1378 * that the flow remains inactive. 1379 * 1380 * The difference between our implementation and 1381 * RFC2861 is that we don't allow cwnd to go below 1382 * the value allowed by RFC5681 (min_cwnd). 1383 */ 1384 idle_time = ticks - tp->snd_last; 1385 decay_cwnd = tp->snd_cwnd; 1386 while (idle_time >= tp->t_rxtcur && 1387 decay_cwnd > min_cwnd) { 1388 decay_cwnd >>= 1; 1389 idle_time -= tp->t_rxtcur; 1390 } 1391 tp->snd_cwnd = max(decay_cwnd, min_cwnd); 1392 } else { 1393 /* 1394 * Slow-start from scratch to re-determine the send 1395 * congestion window. 1396 */ 1397 tp->snd_cwnd = min_cwnd; 1398 } 1399 1400 /* Restart ABC counting during congestion avoidance */ 1401 tp->snd_wacked = 0; 1402 } 1403 1404 static int 1405 tcp_tso_getsize(struct tcpcb *tp, u_int *segsz, u_int *hlen0) 1406 { 1407 struct inpcb * const inp = tp->t_inpcb; 1408 #ifdef INET6 1409 const boolean_t isipv6 = INP_ISIPV6(inp); 1410 #else 1411 const boolean_t isipv6 = FALSE; 1412 #endif 1413 unsigned int ipoptlen, optlen; 1414 u_int hlen; 1415 1416 hlen = sizeof(struct ip) + sizeof(struct tcphdr); 1417 1418 if (isipv6) { 1419 ipoptlen = ip6_optlen(inp); 1420 } else { 1421 if (inp->inp_options) { 1422 ipoptlen = inp->inp_options->m_len - 1423 offsetof(struct ipoption, ipopt_list); 1424 } else { 1425 ipoptlen = 0; 1426 } 1427 } 1428 hlen += ipoptlen; 1429 1430 optlen = 0; 1431 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 1432 (tp->t_flags & TF_RCVD_TSTMP)) 1433 optlen += TCPOLEN_TSTAMP_APPA; 1434 hlen += optlen; 1435 1436 if (tp->t_maxopd <= optlen + ipoptlen) 1437 return EHOSTUNREACH; 1438 1439 *segsz = tp->t_maxopd - optlen - ipoptlen; 1440 *hlen0 = hlen; 1441 return 0; 1442 } 1443 1444 static void 1445 tcp_output_sched_handler(netmsg_t nmsg) 1446 { 1447 struct tcpcb *tp = nmsg->lmsg.u.ms_resultp; 1448 1449 /* Reply ASAP */ 1450 crit_enter(); 1451 lwkt_replymsg(&nmsg->lmsg, 0); 1452 crit_exit(); 1453 1454 tcp_output_fair(tp); 1455 } 1456 1457 void 1458 tcp_output_init(struct tcpcb *tp) 1459 { 1460 netmsg_init(tp->tt_sndmore, NULL, &netisr_adone_rport, MSGF_DROPABLE, 1461 tcp_output_sched_handler); 1462 tp->tt_sndmore->lmsg.u.ms_resultp = tp; 1463 } 1464 1465 void 1466 tcp_output_cancel(struct tcpcb *tp) 1467 { 1468 /* 1469 * This message is still pending to be processed; 1470 * drop it. Optimized. 1471 */ 1472 crit_enter(); 1473 if ((tp->tt_sndmore->lmsg.ms_flags & MSGF_DONE) == 0) { 1474 lwkt_dropmsg(&tp->tt_sndmore->lmsg); 1475 } 1476 crit_exit(); 1477 } 1478 1479 boolean_t 1480 tcp_output_pending(struct tcpcb *tp) 1481 { 1482 if ((tp->tt_sndmore->lmsg.ms_flags & MSGF_DONE) == 0) 1483 return TRUE; 1484 else 1485 return FALSE; 1486 } 1487 1488 static void 1489 tcp_output_sched(struct tcpcb *tp) 1490 { 1491 crit_enter(); 1492 if (tp->tt_sndmore->lmsg.ms_flags & MSGF_DONE) 1493 lwkt_sendmsg(netisr_cpuport(mycpuid), &tp->tt_sndmore->lmsg); 1494 crit_exit(); 1495 } 1496 1497 /* 1498 * Fairsend 1499 * 1500 * Yield to other senders or receivers on the same netisr if the current 1501 * TCP stream has sent tcp_fairsend segments and is going to burst more 1502 * segments. Bursting large amount of segements in a single TCP stream 1503 * could delay other senders' segments and receivers' ACKs quite a lot, 1504 * if others segments and ACKs are queued on to the same hardware transmit 1505 * queue; thus cause unfairness between senders and suppress receiving 1506 * performance. 1507 * 1508 * Fairsend should be performed at the places that do not affect segment 1509 * sending during congestion control, e.g. 1510 * - User requested output 1511 * - ACK input triggered output 1512 * 1513 * NOTE: 1514 * For devices that are TSO capable, their TSO aggregation size limit could 1515 * affect fairsend. 1516 */ 1517 int 1518 tcp_output_fair(struct tcpcb *tp) 1519 { 1520 int ret; 1521 1522 tp->t_flags |= TF_FAIRSEND; 1523 ret = tcp_output(tp); 1524 tp->t_flags &= ~TF_FAIRSEND; 1525 1526 return ret; 1527 } 1528