1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $ 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/queue.h> 41 #include <sys/thread.h> 42 #include <sys/types.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 46 #include <net/if.h> 47 48 #include <netinet/in.h> 49 #include <netinet/in_systm.h> 50 #include <netinet/ip.h> 51 #include <netinet/in_var.h> 52 #include <netinet/in_pcb.h> 53 #include <netinet/ip_var.h> 54 #include <netinet/tcp.h> 55 #include <netinet/tcp_seq.h> 56 #include <netinet/tcp_var.h> 57 58 /* 59 * Implemented: 60 * 61 * RFC 2018 62 * RFC 2883 63 * RFC 3517 64 * RFC 6675 65 */ 66 67 struct sackblock { 68 tcp_seq sblk_start; 69 tcp_seq sblk_end; 70 TAILQ_ENTRY(sackblock) sblk_list; 71 }; 72 73 #define MAXSAVEDBLOCKS 8 /* per connection limit */ 74 75 static int insert_block(struct scoreboard *scb, 76 const struct raw_sackblock *raw_sb, boolean_t *update); 77 78 static MALLOC_DEFINE(M_SACKBLOCK, "sblk", "sackblock struct"); 79 80 /* 81 * Per-tcpcb initialization. 82 */ 83 void 84 tcp_sack_tcpcb_init(struct tcpcb *tp) 85 { 86 struct scoreboard *scb = &tp->scb; 87 88 scb->nblocks = 0; 89 TAILQ_INIT(&scb->sackblocks); 90 scb->lastfound = NULL; 91 } 92 93 /* 94 * Find the SACK block containing or immediately preceding "seq". 95 * The boolean result indicates whether the sequence is actually 96 * contained in the SACK block. 97 */ 98 static boolean_t 99 sack_block_lookup(struct scoreboard *scb, tcp_seq seq, struct sackblock **sb) 100 { 101 struct sackblock *hint = scb->lastfound; 102 struct sackblock *cur, *last, *prev; 103 104 if (TAILQ_EMPTY(&scb->sackblocks)) { 105 *sb = NULL; 106 return FALSE; 107 } 108 109 if (hint == NULL) { 110 /* No hint. Search from start to end. */ 111 cur = TAILQ_FIRST(&scb->sackblocks); 112 last = NULL; 113 prev = TAILQ_LAST(&scb->sackblocks, sackblock_list); 114 } else { 115 if (SEQ_GEQ(seq, hint->sblk_start)) { 116 /* Search from hint to end of list. */ 117 cur = hint; 118 last = NULL; 119 prev = TAILQ_LAST(&scb->sackblocks, sackblock_list); 120 } else { 121 /* Search from front of list to hint. */ 122 cur = TAILQ_FIRST(&scb->sackblocks); 123 last = hint; 124 prev = TAILQ_PREV(hint, sackblock_list, sblk_list); 125 } 126 } 127 128 do { 129 if (SEQ_GT(cur->sblk_end, seq)) { 130 if (SEQ_GEQ(seq, cur->sblk_start)) { 131 *sb = scb->lastfound = cur; 132 return TRUE; 133 } else { 134 *sb = scb->lastfound = 135 TAILQ_PREV(cur, sackblock_list, sblk_list); 136 return FALSE; 137 } 138 } 139 cur = TAILQ_NEXT(cur, sblk_list); 140 } while (cur != last); 141 142 *sb = scb->lastfound = prev; 143 return FALSE; 144 } 145 146 /* 147 * Allocate a SACK block. 148 */ 149 static __inline struct sackblock * 150 alloc_sackblock(struct scoreboard *scb, const struct raw_sackblock *raw_sb) 151 { 152 struct sackblock *sb; 153 154 if (scb->freecache != NULL) { 155 sb = scb->freecache; 156 scb->freecache = NULL; 157 tcpstat.tcps_sacksbfast++; 158 } else { 159 sb = kmalloc(sizeof(struct sackblock), M_SACKBLOCK, M_NOWAIT); 160 if (sb == NULL) { 161 tcpstat.tcps_sacksbfailed++; 162 return NULL; 163 } 164 } 165 sb->sblk_start = raw_sb->rblk_start; 166 sb->sblk_end = raw_sb->rblk_end; 167 return sb; 168 } 169 170 static __inline struct sackblock * 171 alloc_sackblock_limit(struct scoreboard *scb, 172 const struct raw_sackblock *raw_sb) 173 { 174 if (scb->nblocks == MAXSAVEDBLOCKS) { 175 /* 176 * Should try to kick out older blocks XXX JH 177 * May be able to coalesce with existing block. 178 * Or, go other way and free all blocks if we hit 179 * this limit. 180 */ 181 tcpstat.tcps_sacksboverflow++; 182 return NULL; 183 } 184 return alloc_sackblock(scb, raw_sb); 185 } 186 187 /* 188 * Free a SACK block. 189 */ 190 static __inline void 191 free_sackblock(struct scoreboard *scb, struct sackblock *s) 192 { 193 if (scb->freecache == NULL) { 194 /* YYY Maybe use the latest freed block? */ 195 scb->freecache = s; 196 return; 197 } 198 kfree(s, M_SACKBLOCK); 199 } 200 201 /* 202 * Free up SACK blocks for data that's been acked. 203 */ 204 static void 205 tcp_sack_ack_blocks(struct tcpcb *tp, tcp_seq th_ack) 206 { 207 struct scoreboard *scb = &tp->scb; 208 struct sackblock *sb, *nb; 209 210 sb = TAILQ_FIRST(&scb->sackblocks); 211 while (sb && SEQ_LEQ(sb->sblk_end, th_ack)) { 212 nb = TAILQ_NEXT(sb, sblk_list); 213 if (scb->lastfound == sb) 214 scb->lastfound = NULL; 215 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); 216 free_sackblock(scb, sb); 217 --scb->nblocks; 218 KASSERT(scb->nblocks >= 0, 219 ("SACK block count underflow: %d < 0", scb->nblocks)); 220 sb = nb; 221 } 222 if (sb && SEQ_GEQ(th_ack, sb->sblk_start)) { 223 /* Other side reneged? XXX */ 224 tcpstat.tcps_sackrenege++; 225 tcp_sack_discard(tp); 226 } 227 } 228 229 /* 230 * Delete and free SACK blocks saved in scoreboard. 231 */ 232 static void 233 tcp_sack_cleanup(struct scoreboard *scb) 234 { 235 struct sackblock *sb, *nb; 236 237 TAILQ_FOREACH_MUTABLE(sb, &scb->sackblocks, sblk_list, nb) { 238 free_sackblock(scb, sb); 239 --scb->nblocks; 240 } 241 KASSERT(scb->nblocks == 0, 242 ("SACK block %d count not zero", scb->nblocks)); 243 TAILQ_INIT(&scb->sackblocks); 244 scb->lastfound = NULL; 245 } 246 247 /* 248 * Discard SACK scoreboard, HighRxt, RescueRxt and LostSeq. 249 */ 250 void 251 tcp_sack_discard(struct tcpcb *tp) 252 { 253 tcp_sack_cleanup(&tp->scb); 254 tp->rexmt_high = tp->snd_una; 255 tp->sack_flags &= ~TSACK_F_SACKRESCUED; 256 tp->scb.lostseq = tp->snd_una; 257 } 258 259 /* 260 * Delete and free SACK blocks saved in scoreboard. 261 * Delete the one slot block cache. 262 */ 263 void 264 tcp_sack_destroy(struct scoreboard *scb) 265 { 266 tcp_sack_cleanup(scb); 267 if (scb->freecache != NULL) { 268 kfree(scb->freecache, M_SACKBLOCK); 269 scb->freecache = NULL; 270 } 271 } 272 273 /* 274 * Cleanup the reported SACK block information 275 */ 276 void 277 tcp_sack_report_cleanup(struct tcpcb *tp) 278 { 279 tp->sack_flags &= 280 ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT); 281 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 282 } 283 284 /* 285 * Returns 0 if not D-SACK block, 286 * 1 if D-SACK, 287 * 2 if duplicate of out-of-order D-SACK block. 288 */ 289 int 290 tcp_sack_ndsack_blocks(const struct raw_sackblock *blocks, const int numblocks, 291 tcp_seq snd_una) 292 { 293 if (numblocks == 0) 294 return 0; 295 296 if (SEQ_LT(blocks[0].rblk_start, snd_una)) 297 return 1; 298 299 /* block 0 inside block 1 */ 300 if (numblocks > 1 && 301 SEQ_GEQ(blocks[0].rblk_start, blocks[1].rblk_start) && 302 SEQ_LEQ(blocks[0].rblk_end, blocks[1].rblk_end)) 303 return 2; 304 305 return 0; 306 } 307 308 /* 309 * Update scoreboard on new incoming ACK. 310 */ 311 static void 312 tcp_sack_add_blocks(struct tcpcb *tp, struct tcpopt *to) 313 { 314 const int numblocks = to->to_nsackblocks; 315 struct raw_sackblock *blocks = to->to_sackblocks; 316 struct scoreboard *scb = &tp->scb; 317 int startblock, i; 318 319 if (tcp_sack_ndsack_blocks(blocks, numblocks, tp->snd_una) > 0) 320 startblock = 1; 321 else 322 startblock = 0; 323 324 to->to_flags |= TOF_SACK_REDUNDANT; 325 for (i = startblock; i < numblocks; i++) { 326 struct raw_sackblock *newsackblock = &blocks[i]; 327 boolean_t update; 328 int error; 329 330 /* Guard against ACK reordering */ 331 if (SEQ_LEQ(newsackblock->rblk_start, tp->snd_una)) 332 continue; 333 334 /* Don't accept bad SACK blocks */ 335 if (SEQ_GT(newsackblock->rblk_end, tp->snd_max)) { 336 tcpstat.tcps_rcvbadsackopt++; 337 break; /* skip all other blocks */ 338 } 339 tcpstat.tcps_sacksbupdate++; 340 341 error = insert_block(scb, newsackblock, &update); 342 if (update) 343 to->to_flags &= ~TOF_SACK_REDUNDANT; 344 if (error) 345 break; 346 } 347 } 348 349 void 350 tcp_sack_update_scoreboard(struct tcpcb *tp, struct tcpopt *to) 351 { 352 struct scoreboard *scb = &tp->scb; 353 int rexmt_high_update = 0; 354 355 tcp_sack_ack_blocks(tp, tp->snd_una); 356 tcp_sack_add_blocks(tp, to); 357 tcp_sack_update_lostseq(scb, tp->snd_una, tp->t_maxseg, 358 tp->t_rxtthresh); 359 if (SEQ_LT(tp->rexmt_high, tp->snd_una)) { 360 tp->rexmt_high = tp->snd_una; 361 rexmt_high_update = 1; 362 } 363 if (tp->sack_flags & TSACK_F_SACKRESCUED) { 364 if (SEQ_LEQ(tp->rexmt_rescue, tp->snd_una)) { 365 tp->sack_flags &= ~TSACK_F_SACKRESCUED; 366 } else if (tcp_aggressive_rescuesack && rexmt_high_update && 367 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) { 368 /* Drag RescueRxt along with HighRxt */ 369 tp->rexmt_rescue = tp->rexmt_high; 370 } 371 } 372 } 373 374 /* 375 * Insert SACK block into sender's scoreboard. 376 */ 377 static int 378 insert_block(struct scoreboard *scb, const struct raw_sackblock *raw_sb, 379 boolean_t *update) 380 { 381 struct sackblock *sb, *workingblock; 382 boolean_t overlap_front; 383 384 *update = TRUE; 385 if (TAILQ_EMPTY(&scb->sackblocks)) { 386 struct sackblock *newblock; 387 388 KASSERT(scb->nblocks == 0, ("emply scb w/ blocks")); 389 390 newblock = alloc_sackblock(scb, raw_sb); 391 if (newblock == NULL) 392 return ENOMEM; 393 TAILQ_INSERT_HEAD(&scb->sackblocks, newblock, sblk_list); 394 scb->nblocks = 1; 395 return 0; 396 } 397 398 KASSERT(scb->nblocks > 0, ("insert_block() called w/ no blocks")); 399 KASSERT(scb->nblocks <= MAXSAVEDBLOCKS, 400 ("too many SACK blocks %d", scb->nblocks)); 401 402 overlap_front = sack_block_lookup(scb, raw_sb->rblk_start, &sb); 403 404 if (sb == NULL) { 405 workingblock = alloc_sackblock_limit(scb, raw_sb); 406 if (workingblock == NULL) 407 return ENOMEM; 408 TAILQ_INSERT_HEAD(&scb->sackblocks, workingblock, sblk_list); 409 ++scb->nblocks; 410 } else { 411 if (overlap_front || sb->sblk_end == raw_sb->rblk_start) { 412 tcpstat.tcps_sacksbreused++; 413 414 /* Extend old block */ 415 workingblock = sb; 416 if (SEQ_GT(raw_sb->rblk_end, sb->sblk_end)) { 417 sb->sblk_end = raw_sb->rblk_end; 418 } else { 419 /* Exact match, nothing to consolidate */ 420 *update = FALSE; 421 return 0; 422 } 423 } else { 424 workingblock = alloc_sackblock_limit(scb, raw_sb); 425 if (workingblock == NULL) 426 return ENOMEM; 427 TAILQ_INSERT_AFTER(&scb->sackblocks, sb, workingblock, 428 sblk_list); 429 ++scb->nblocks; 430 } 431 } 432 433 /* Consolidate right-hand side. */ 434 sb = TAILQ_NEXT(workingblock, sblk_list); 435 while (sb != NULL && 436 SEQ_GEQ(workingblock->sblk_end, sb->sblk_end)) { 437 struct sackblock *nextblock; 438 439 nextblock = TAILQ_NEXT(sb, sblk_list); 440 if (scb->lastfound == sb) 441 scb->lastfound = NULL; 442 /* Remove completely overlapped block */ 443 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); 444 free_sackblock(scb, sb); 445 --scb->nblocks; 446 KASSERT(scb->nblocks > 0, 447 ("removed overlapped block: %d blocks left", scb->nblocks)); 448 sb = nextblock; 449 } 450 if (sb != NULL && 451 SEQ_GEQ(workingblock->sblk_end, sb->sblk_start)) { 452 /* Extend new block to cover partially overlapped old block. */ 453 workingblock->sblk_end = sb->sblk_end; 454 if (scb->lastfound == sb) 455 scb->lastfound = NULL; 456 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); 457 free_sackblock(scb, sb); 458 --scb->nblocks; 459 KASSERT(scb->nblocks > 0, 460 ("removed partial right: %d blocks left", scb->nblocks)); 461 } 462 return 0; 463 } 464 465 #ifdef DEBUG_SACK_BLOCKS 466 static void 467 tcp_sack_dump_blocks(const struct scoreboard *scb) 468 { 469 const struct sackblock *sb; 470 471 kprintf("%d blocks:", scb->nblocks); 472 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) 473 kprintf(" [%u, %u)", sb->sblk_start, sb->sblk_end); 474 kprintf("\n"); 475 } 476 #else 477 static __inline void 478 tcp_sack_dump_blocks(const struct scoreboard *scb) 479 { 480 } 481 #endif 482 483 /* 484 * Optimization to quickly determine which packets are lost. 485 */ 486 void 487 tcp_sack_update_lostseq(struct scoreboard *scb, tcp_seq snd_una, u_int maxseg, 488 int rxtthresh) 489 { 490 struct sackblock *sb; 491 int nsackblocks = 0; 492 int bytes_sacked = 0; 493 int rxtthresh_bytes; 494 495 if (tcp_do_rfc6675) 496 rxtthresh_bytes = (rxtthresh - 1) * maxseg; 497 else 498 rxtthresh_bytes = rxtthresh * maxseg; 499 500 sb = TAILQ_LAST(&scb->sackblocks, sackblock_list); 501 while (sb != NULL) { 502 ++nsackblocks; 503 bytes_sacked += sb->sblk_end - sb->sblk_start; 504 if (nsackblocks == rxtthresh || 505 bytes_sacked >= rxtthresh_bytes) { 506 scb->lostseq = sb->sblk_start; 507 return; 508 } 509 sb = TAILQ_PREV(sb, sackblock_list, sblk_list); 510 } 511 scb->lostseq = snd_una; 512 } 513 514 /* 515 * Return whether the given sequence number is considered lost. 516 */ 517 boolean_t 518 tcp_sack_islost(const struct scoreboard *scb, tcp_seq seqnum) 519 { 520 return SEQ_LT(seqnum, scb->lostseq); 521 } 522 523 /* 524 * True if at least "amount" has been SACKed. Used by Early Retransmit. 525 */ 526 boolean_t 527 tcp_sack_has_sacked(const struct scoreboard *scb, u_int amount) 528 { 529 const struct sackblock *sb; 530 int bytes_sacked = 0; 531 532 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) { 533 bytes_sacked += sb->sblk_end - sb->sblk_start; 534 if (bytes_sacked >= amount) 535 return TRUE; 536 } 537 return FALSE; 538 } 539 540 /* 541 * Number of bytes SACKed below seq. 542 */ 543 int 544 tcp_sack_bytes_below(const struct scoreboard *scb, tcp_seq seq) 545 { 546 const struct sackblock *sb; 547 int bytes_sacked = 0; 548 549 sb = TAILQ_FIRST(&scb->sackblocks); 550 while (sb && SEQ_GT(seq, sb->sblk_start)) { 551 bytes_sacked += seq_min(seq, sb->sblk_end) - sb->sblk_start; 552 sb = TAILQ_NEXT(sb, sblk_list); 553 } 554 return bytes_sacked; 555 } 556 557 /* 558 * Return estimate of the number of bytes outstanding in the network. 559 */ 560 uint32_t 561 tcp_sack_compute_pipe(const struct tcpcb *tp) 562 { 563 const struct scoreboard *scb = &tp->scb; 564 const struct sackblock *sb; 565 int nlost, nretransmitted; 566 tcp_seq end; 567 568 nlost = tp->snd_max - scb->lostseq; 569 nretransmitted = tp->rexmt_high - tp->snd_una; 570 571 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) { 572 if (SEQ_LT(sb->sblk_start, tp->rexmt_high)) { 573 end = seq_min(sb->sblk_end, tp->rexmt_high); 574 nretransmitted -= end - sb->sblk_start; 575 } 576 if (SEQ_GEQ(sb->sblk_start, scb->lostseq)) 577 nlost -= sb->sblk_end - sb->sblk_start; 578 } 579 580 return (nlost + nretransmitted); 581 } 582 583 /* 584 * Return the sequence number and length of the next segment to transmit 585 * when in Fast Recovery. 586 */ 587 boolean_t 588 tcp_sack_nextseg(struct tcpcb *tp, tcp_seq *nextrexmt, uint32_t *plen, 589 boolean_t *rescue) 590 { 591 struct scoreboard *scb = &tp->scb; 592 struct socket *so = tp->t_inpcb->inp_socket; 593 struct sackblock *sb; 594 const struct sackblock *lastblock = 595 TAILQ_LAST(&scb->sackblocks, sackblock_list); 596 tcp_seq torexmt; 597 long len, off, sendwin; 598 599 /* skip SACKed data */ 600 tcp_sack_skip_sacked(scb, &tp->rexmt_high); 601 602 /* Look for lost data. */ 603 torexmt = tp->rexmt_high; 604 *rescue = FALSE; 605 if (lastblock != NULL) { 606 if (SEQ_LT(torexmt, lastblock->sblk_end) && 607 tcp_sack_islost(scb, torexmt)) { 608 sendunsacked: 609 *nextrexmt = torexmt; 610 /* If the left-hand edge has been SACKed, pull it in. */ 611 if (sack_block_lookup(scb, torexmt + tp->t_maxseg, &sb)) 612 *plen = sb->sblk_start - torexmt; 613 else 614 *plen = tp->t_maxseg; 615 return TRUE; 616 } 617 } 618 619 /* See if unsent data available within send window. */ 620 off = tp->snd_max - tp->snd_una; 621 sendwin = min(tp->snd_wnd, tp->snd_bwnd); 622 len = (long) ulmin(so->so_snd.ssb_cc, sendwin) - off; 623 if (len > 0) { 624 *nextrexmt = tp->snd_max; /* Send new data. */ 625 *plen = tp->t_maxseg; 626 return TRUE; 627 } 628 629 /* We're less certain this data has been lost. */ 630 if (lastblock != NULL && SEQ_LT(torexmt, lastblock->sblk_end)) 631 goto sendunsacked; 632 633 /* Rescue retransmission */ 634 if (tcp_do_rescuesack || tcp_do_rfc6675) { 635 tcpstat.tcps_sackrescue_try++; 636 if (tp->sack_flags & TSACK_F_SACKRESCUED) { 637 if (!tcp_aggressive_rescuesack) 638 return FALSE; 639 640 /* 641 * Aggressive variant of the rescue retransmission. 642 * 643 * The idea of the rescue retransmission is to sustain 644 * the ACK clock thus to avoid timeout retransmission. 645 * 646 * Under some situations, the conservative approach 647 * suggested in the draft 648 * http://tools.ietf.org/html/ 649 * draft-nishida-tcpm-rescue-retransmission-00 650 * could not sustain ACK clock, since it only allows 651 * one rescue retransmission before a cumulative ACK 652 * covers the segement transmitted by rescue 653 * retransmission. 654 * 655 * We try to locate the next unSACKed segment which 656 * follows the previously sent rescue segment. If 657 * there is no such segment, we loop back to the first 658 * unacknowledged segment. 659 */ 660 661 /* 662 * Skip SACKed data, but here we follow 663 * the last transmitted rescue segment. 664 */ 665 torexmt = tp->rexmt_rescue; 666 tcp_sack_skip_sacked(scb, &torexmt); 667 } 668 if (torexmt == tp->snd_max) { 669 /* Nothing left to retransmit; restart */ 670 torexmt = tp->snd_una; 671 } 672 *rescue = TRUE; 673 goto sendunsacked; 674 } else if (tcp_do_smartsack && lastblock == NULL) { 675 tcpstat.tcps_sackrescue_try++; 676 *rescue = TRUE; 677 goto sendunsacked; 678 } 679 680 return FALSE; 681 } 682 683 /* 684 * Return the next sequence number higher than "*prexmt" that has 685 * not been SACKed. 686 */ 687 void 688 tcp_sack_skip_sacked(struct scoreboard *scb, tcp_seq *prexmt) 689 { 690 struct sackblock *sb; 691 692 /* skip SACKed data */ 693 if (sack_block_lookup(scb, *prexmt, &sb)) 694 *prexmt = sb->sblk_end; 695 } 696 697 /* 698 * The length of the first amount of unSACKed data 699 */ 700 uint32_t 701 tcp_sack_first_unsacked_len(const struct tcpcb *tp) 702 { 703 const struct sackblock *sb; 704 705 sb = TAILQ_FIRST(&tp->scb.sackblocks); 706 if (sb == NULL) 707 return tp->t_maxseg; 708 709 KASSERT(SEQ_LT(tp->snd_una, sb->sblk_start), 710 ("invalid sb start %u, snd_una %u", 711 sb->sblk_start, tp->snd_una)); 712 return (sb->sblk_start - tp->snd_una); 713 } 714 715 #ifdef later 716 void 717 tcp_sack_save_scoreboard(struct scoreboard *scb) 718 { 719 struct scoreboard *scb = &tp->scb; 720 721 scb->sackblocks_prev = scb->sackblocks; 722 TAILQ_INIT(&scb->sackblocks); 723 } 724 725 void 726 tcp_sack_revert_scoreboard(struct scoreboard *scb, tcp_seq snd_una, 727 u_int maxseg) 728 { 729 struct sackblock *sb; 730 731 scb->sackblocks = scb->sackblocks_prev; 732 scb->nblocks = 0; 733 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) 734 ++scb->nblocks; 735 tcp_sack_ack_blocks(scb, snd_una); 736 scb->lastfound = NULL; 737 } 738 #endif 739 740 #ifdef DEBUG_SACK_HISTORY 741 static void 742 tcp_sack_dump_history(const char *msg, const struct tcpcb *tp) 743 { 744 int i; 745 static int ndumped; 746 747 /* only need a couple of these to debug most problems */ 748 if (++ndumped > 900) 749 return; 750 751 kprintf("%s:\tnsackhistory %d: ", msg, tp->nsackhistory); 752 for (i = 0; i < tp->nsackhistory; ++i) 753 kprintf("[%u, %u) ", tp->sackhistory[i].rblk_start, 754 tp->sackhistory[i].rblk_end); 755 kprintf("\n"); 756 } 757 #else 758 static __inline void 759 tcp_sack_dump_history(const char *msg, const struct tcpcb *tp) 760 { 761 } 762 #endif 763 764 /* 765 * Remove old SACK blocks from the SACK history that have already been ACKed. 766 */ 767 static void 768 tcp_sack_ack_history(struct tcpcb *tp) 769 { 770 int i, nblocks, openslot; 771 772 tcp_sack_dump_history("before tcp_sack_ack_history", tp); 773 nblocks = tp->nsackhistory; 774 for (i = openslot = 0; i < nblocks; ++i) { 775 if (SEQ_LEQ(tp->sackhistory[i].rblk_end, tp->rcv_nxt)) { 776 --tp->nsackhistory; 777 continue; 778 } 779 if (SEQ_LT(tp->sackhistory[i].rblk_start, tp->rcv_nxt)) 780 tp->sackhistory[i].rblk_start = tp->rcv_nxt; 781 if (i == openslot) 782 ++openslot; 783 else 784 tp->sackhistory[openslot++] = tp->sackhistory[i]; 785 } 786 tcp_sack_dump_history("after tcp_sack_ack_history", tp); 787 KASSERT(openslot == tp->nsackhistory, 788 ("tcp_sack_ack_history miscounted: %d != %d", 789 openslot, tp->nsackhistory)); 790 } 791 792 /* 793 * Add or merge newblock into reported history. 794 * Also remove or update SACK blocks that will be acked. 795 */ 796 static void 797 tcp_sack_update_reported_history(struct tcpcb *tp, tcp_seq start, tcp_seq end) 798 { 799 struct raw_sackblock copy[MAX_SACK_REPORT_BLOCKS]; 800 int i, cindex; 801 802 tcp_sack_dump_history("before tcp_sack_update_reported_history", tp); 803 /* 804 * Six cases: 805 * 0) no overlap 806 * 1) newblock == oldblock 807 * 2) oldblock contains newblock 808 * 3) newblock contains oldblock 809 * 4) tail of oldblock overlaps or abuts start of newblock 810 * 5) tail of newblock overlaps or abuts head of oldblock 811 */ 812 for (i = cindex = 0; i < tp->nsackhistory; ++i) { 813 struct raw_sackblock *oldblock = &tp->sackhistory[i]; 814 tcp_seq old_start = oldblock->rblk_start; 815 tcp_seq old_end = oldblock->rblk_end; 816 817 if (SEQ_LT(end, old_start) || SEQ_GT(start, old_end)) { 818 /* Case 0: no overlap. Copy old block. */ 819 copy[cindex++] = *oldblock; 820 continue; 821 } 822 823 if (SEQ_GEQ(start, old_start) && SEQ_LEQ(end, old_end)) { 824 /* Cases 1 & 2. Move block to front of history. */ 825 int j; 826 827 start = old_start; 828 end = old_end; 829 /* no need to check rest of blocks */ 830 for (j = i + 1; j < tp->nsackhistory; ++j) 831 copy[cindex++] = tp->sackhistory[j]; 832 break; 833 } 834 835 if (SEQ_GEQ(old_end, start) && SEQ_LT(old_start, start)) { 836 /* Case 4: extend start of new block. */ 837 start = old_start; 838 } else if (SEQ_GEQ(end, old_start) && SEQ_GT(old_end, end)) { 839 /* Case 5: extend end of new block */ 840 end = old_end; 841 } else { 842 /* Case 3. Delete old block by not copying it. */ 843 KASSERT(SEQ_LEQ(start, old_start) && 844 SEQ_GEQ(end, old_end), 845 ("bad logic: old [%u, %u), new [%u, %u)", 846 old_start, old_end, start, end)); 847 } 848 } 849 850 /* insert new block */ 851 tp->sackhistory[0].rblk_start = start; 852 tp->sackhistory[0].rblk_end = end; 853 cindex = min(cindex, MAX_SACK_REPORT_BLOCKS - 1); 854 for (i = 0; i < cindex; ++i) 855 tp->sackhistory[i + 1] = copy[i]; 856 tp->nsackhistory = cindex + 1; 857 tcp_sack_dump_history("after tcp_sack_update_reported_history", tp); 858 } 859 860 /* 861 * Fill in SACK report to return to data sender. 862 */ 863 void 864 tcp_sack_fill_report(struct tcpcb *tp, u_char *opt, u_int *plen) 865 { 866 u_int optlen = *plen; 867 uint32_t *lp = (uint32_t *)(opt + optlen); 868 uint32_t *olp; 869 tcp_seq hstart = tp->rcv_nxt, hend; 870 int nblocks; 871 872 KASSERT(TCP_MAXOLEN - optlen >= 873 TCPOLEN_SACK_ALIGNED + TCPOLEN_SACK_BLOCK, 874 ("no room for SACK header and one block: optlen %d", optlen)); 875 876 if (tp->sack_flags & TSACK_F_DUPSEG) 877 tcpstat.tcps_snddsackopt++; 878 else 879 tcpstat.tcps_sndsackopt++; 880 881 olp = lp++; 882 optlen += TCPOLEN_SACK_ALIGNED; 883 884 tcp_sack_ack_history(tp); 885 if (tp->reportblk.rblk_start != tp->reportblk.rblk_end) { 886 *lp++ = htonl(tp->reportblk.rblk_start); 887 *lp++ = htonl(tp->reportblk.rblk_end); 888 optlen += TCPOLEN_SACK_BLOCK; 889 hstart = tp->reportblk.rblk_start; 890 hend = tp->reportblk.rblk_end; 891 if (tp->sack_flags & TSACK_F_ENCLOSESEG) { 892 KASSERT(TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK, 893 ("no room for enclosing SACK block: oplen %d", 894 optlen)); 895 *lp++ = htonl(tp->encloseblk.rblk_start); 896 *lp++ = htonl(tp->encloseblk.rblk_end); 897 optlen += TCPOLEN_SACK_BLOCK; 898 hstart = tp->encloseblk.rblk_start; 899 hend = tp->encloseblk.rblk_end; 900 } 901 if (SEQ_GT(hstart, tp->rcv_nxt)) 902 tcp_sack_update_reported_history(tp, hstart, hend); 903 } 904 if (tcp_do_smartsack && (tp->sack_flags & TSACK_F_SACKLEFT)) { 905 /* Fill in from left! Walk re-assembly queue. */ 906 struct tseg_qent *q; 907 908 q = TAILQ_FIRST(&tp->t_segq); 909 while (q != NULL && 910 TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) { 911 *lp++ = htonl(q->tqe_th->th_seq); 912 *lp++ = htonl(TCP_SACK_BLKEND( 913 q->tqe_th->th_seq + q->tqe_len, 914 q->tqe_th->th_flags)); 915 optlen += TCPOLEN_SACK_BLOCK; 916 q = TAILQ_NEXT(q, tqe_q); 917 } 918 } else { 919 int n = 0; 920 921 /* Fill in SACK blocks from right side. */ 922 while (n < tp->nsackhistory && 923 TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) { 924 if (tp->sackhistory[n].rblk_start != hstart) { 925 *lp++ = htonl(tp->sackhistory[n].rblk_start); 926 *lp++ = htonl(tp->sackhistory[n].rblk_end); 927 optlen += TCPOLEN_SACK_BLOCK; 928 } 929 ++n; 930 } 931 } 932 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 933 tp->sack_flags &= 934 ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT); 935 nblocks = (lp - olp - 1) / 2; 936 *olp = htonl(TCPOPT_SACK_ALIGNED | 937 (TCPOLEN_SACK + nblocks * TCPOLEN_SACK_BLOCK)); 938 *plen = optlen; 939 } 940