1 /*- 2 * Copyright (c) 2016-2018 3 * Netflix Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * Author: Randall Stewart <rrs@netflix.com> 30 * This work is based on the ACM Queue paper 31 * BBR - Congestion Based Congestion Control 32 * and also numerous discussions with Neal, Yuchung and Van. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 #include "opt_ipsec.h" 41 #include "opt_tcpdebug.h" 42 #include "opt_ratelimit.h" 43 #include "opt_kern_tls.h" 44 #include <sys/param.h> 45 #include <sys/module.h> 46 #include <sys/kernel.h> 47 #ifdef TCP_HHOOK 48 #include <sys/hhook.h> 49 #endif 50 #include <sys/malloc.h> 51 #include <sys/mbuf.h> 52 #include <sys/proc.h> 53 #include <sys/qmath.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #ifdef KERN_TLS 57 #include <sys/ktls.h> 58 #endif 59 #include <sys/sysctl.h> 60 #include <sys/systm.h> 61 #include <sys/tree.h> 62 #ifdef NETFLIX_STATS 63 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 64 #endif 65 #include <sys/refcount.h> 66 #include <sys/queue.h> 67 #include <sys/smp.h> 68 #include <sys/kthread.h> 69 #include <sys/lock.h> 70 #include <sys/mutex.h> 71 #include <sys/tim_filter.h> 72 #include <sys/time.h> 73 #include <vm/uma.h> 74 #include <sys/kern_prefetch.h> 75 76 #include <net/route.h> 77 #include <net/vnet.h> 78 #include <net/ethernet.h> 79 #include <net/bpf.h> 80 81 #define TCPSTATES /* for logging */ 82 83 #include <netinet/in.h> 84 #include <netinet/in_kdtrace.h> 85 #include <netinet/in_pcb.h> 86 #include <netinet/ip.h> 87 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 88 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 89 #include <netinet/ip_var.h> 90 #include <netinet/ip6.h> 91 #include <netinet6/in6_pcb.h> 92 #include <netinet6/ip6_var.h> 93 #define TCPOUTFLAGS 94 #include <netinet/tcp.h> 95 #include <netinet/tcp_fsm.h> 96 #include <netinet/tcp_seq.h> 97 #include <netinet/tcp_timer.h> 98 #include <netinet/tcp_var.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/tcp_hpts.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/tcp_log_buf.h> 103 #ifdef TCPDEBUG 104 #include <netinet/tcp_debug.h> 105 #endif /* TCPDEBUG */ 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_fastopen.h> 113 114 #include <netipsec/ipsec_support.h> 115 #include <net/if.h> 116 #include <net/if_var.h> 117 118 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 119 #include <netipsec/ipsec.h> 120 #include <netipsec/ipsec6.h> 121 #endif /* IPSEC */ 122 123 #include <netinet/udp.h> 124 #include <netinet/udp_var.h> 125 #include <machine/in_cksum.h> 126 127 #ifdef MAC 128 #include <security/mac/mac_framework.h> 129 #endif 130 #include "rack_bbr_common.h" 131 132 /* 133 * Common TCP Functions - These are shared by borth 134 * rack and BBR. 135 */ 136 137 138 #ifdef KERN_TLS 139 uint32_t 140 ctf_get_opt_tls_size(struct socket *so, uint32_t rwnd) 141 { 142 struct ktls_session *tls; 143 uint32_t len; 144 145 again: 146 tls = so->so_snd.sb_tls_info; 147 len = tls->params.max_frame_len; /* max tls payload */ 148 len += tls->params.tls_hlen; /* tls header len */ 149 len += tls->params.tls_tlen; /* tls trailer len */ 150 if ((len * 4) > rwnd) { 151 /* 152 * Stroke this will suck counter and what 153 * else should we do Drew? From the 154 * TCP perspective I am not sure 155 * what should be done... 156 */ 157 if (tls->params.max_frame_len > 4096) { 158 tls->params.max_frame_len -= 4096; 159 if (tls->params.max_frame_len < 4096) 160 tls->params.max_frame_len = 4096; 161 goto again; 162 } 163 } 164 return (len); 165 } 166 #endif 167 168 169 /* 170 * The function ctf_process_inbound_raw() is used by 171 * transport developers to do the steps needed to 172 * support MBUF Queuing i.e. the flags in 173 * inp->inp_flags2: 174 * 175 * - INP_SUPPORTS_MBUFQ 176 * - INP_MBUF_QUEUE_READY 177 * - INP_DONT_SACK_QUEUE 178 * 179 * These flags help control how LRO will deliver 180 * packets to the transport. You first set in inp_flags2 181 * the INP_SUPPORTS_MBUFQ to tell the LRO code that you 182 * will gladly take a queue of packets instead of a compressed 183 * single packet. You also set in your t_fb pointer the 184 * tfb_do_queued_segments to point to ctf_process_inbound_raw. 185 * 186 * This then gets you lists of inbound ACK's/Data instead 187 * of a condensed compressed ACK/DATA packet. Why would you 188 * want that? This will get you access to all the arrival 189 * times of at least LRO and possibly at the Hardware (if 190 * the interface card supports that) of the actual ACK/DATA. 191 * In some transport designs this is important since knowing 192 * the actual time we got the packet is useful information. 193 * 194 * Now there are some interesting Caveats that the transport 195 * designer needs to take into account when using this feature. 196 * 197 * 1) It is used with HPTS and pacing, when the pacing timer 198 * for output calls it will first call the input. 199 * 2) When you set INP_MBUF_QUEUE_READY this tells LRO 200 * queue normal packets, I am busy pacing out data and 201 * will process the queued packets before my tfb_tcp_output 202 * call from pacing. If a non-normal packet arrives, (e.g. sack) 203 * you will be awoken immediately. 204 * 3) Finally you can add the INP_DONT_SACK_QUEUE to not even 205 * be awoken if a SACK has arrived. You would do this when 206 * you were not only running a pacing for output timer 207 * but a Rack timer as well i.e. you know you are in recovery 208 * and are in the process (via the timers) of dealing with 209 * the loss. 210 * 211 * Now a critical thing you must be aware of here is that the 212 * use of the flags has a far greater scope then just your 213 * typical LRO. Why? Well thats because in the normal compressed 214 * LRO case at the end of a driver interupt all packets are going 215 * to get presented to the transport no matter if there is one 216 * or 100. With the MBUF_QUEUE model, this is not true. You will 217 * only be awoken to process the queue of packets when: 218 * a) The flags discussed above allow it. 219 * <or> 220 * b) You exceed a ack or data limit (by default the 221 * ack limit is infinity (64k acks) and the data 222 * limit is 64k of new TCP data) 223 * <or> 224 * c) The push bit has been set by the peer 225 */ 226 227 int 228 ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int has_pkt) 229 { 230 /* 231 * We are passed a raw change of mbuf packets 232 * that arrived in LRO. They are linked via 233 * the m_nextpkt link in the pkt-headers. 234 * 235 * We process each one by: 236 * a) saving off the next 237 * b) stripping off the ether-header 238 * c) formulating the arguments for 239 * the tfb_tcp_hpts_do_segment 240 * d) calling each mbuf to tfb_tcp_hpts_do_segment 241 * after adjusting the time to match the arrival time. 242 * Note that the LRO code assures no IP options are present. 243 * 244 * The symantics for calling tfb_tcp_hpts_do_segment are the 245 * following: 246 * 1) It returns 0 if all went well and you (the caller) need 247 * to release the lock. 248 * 2) If nxt_pkt is set, then the function will surpress calls 249 * to tfb_tcp_output() since you are promising to call again 250 * with another packet. 251 * 3) If it returns 1, then you must free all the packets being 252 * shipped in, the tcb has been destroyed (or about to be destroyed). 253 */ 254 struct mbuf *m_save; 255 struct ether_header *eh; 256 struct tcphdr *th; 257 #ifdef INET6 258 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ 259 #endif 260 #ifdef INET 261 struct ip *ip = NULL; /* Keep compiler happy. */ 262 #endif 263 struct ifnet *ifp; 264 struct timeval tv; 265 int32_t retval, nxt_pkt, tlen, off; 266 uint16_t etype; 267 uint16_t drop_hdrlen; 268 uint8_t iptos, no_vn=0, bpf_req=0; 269 270 NET_EPOCH_ASSERT(); 271 272 if (m && m->m_pkthdr.rcvif) 273 ifp = m->m_pkthdr.rcvif; 274 else 275 ifp = NULL; 276 if (ifp) { 277 bpf_req = bpf_peers_present(ifp->if_bpf); 278 } else { 279 /* 280 * We probably should not work around 281 * but kassert, since lro alwasy sets rcvif. 282 */ 283 no_vn = 1; 284 goto skip_vnet; 285 } 286 CURVNET_SET(ifp->if_vnet); 287 skip_vnet: 288 while (m) { 289 m_save = m->m_nextpkt; 290 m->m_nextpkt = NULL; 291 /* Now lets get the ether header */ 292 eh = mtod(m, struct ether_header *); 293 etype = ntohs(eh->ether_type); 294 /* Let the BPF see the packet */ 295 if (bpf_req && ifp) 296 ETHER_BPF_MTAP(ifp, m); 297 m_adj(m, sizeof(*eh)); 298 /* Trim off the ethernet header */ 299 switch (etype) { 300 #ifdef INET6 301 case ETHERTYPE_IPV6: 302 { 303 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 304 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 305 if (m == NULL) { 306 TCPSTAT_INC(tcps_rcvshort); 307 m_freem(m); 308 goto skipped_pkt; 309 } 310 } 311 ip6 = (struct ip6_hdr *)(eh + 1); 312 th = (struct tcphdr *)(ip6 + 1); 313 tlen = ntohs(ip6->ip6_plen); 314 drop_hdrlen = sizeof(*ip6); 315 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 316 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 317 th->th_sum = m->m_pkthdr.csum_data; 318 else 319 th->th_sum = in6_cksum_pseudo(ip6, tlen, 320 IPPROTO_TCP, m->m_pkthdr.csum_data); 321 th->th_sum ^= 0xffff; 322 } else 323 th->th_sum = in6_cksum(m, IPPROTO_TCP, drop_hdrlen, tlen); 324 if (th->th_sum) { 325 TCPSTAT_INC(tcps_rcvbadsum); 326 m_freem(m); 327 goto skipped_pkt; 328 } 329 /* 330 * Be proactive about unspecified IPv6 address in source. 331 * As we use all-zero to indicate unbounded/unconnected pcb, 332 * unspecified IPv6 address can be used to confuse us. 333 * 334 * Note that packets with unspecified IPv6 destination is 335 * already dropped in ip6_input. 336 */ 337 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 338 /* XXX stat */ 339 m_freem(m); 340 goto skipped_pkt; 341 } 342 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 343 break; 344 } 345 #endif 346 #ifdef INET 347 case ETHERTYPE_IP: 348 { 349 if (m->m_len < sizeof (struct tcpiphdr)) { 350 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 351 == NULL) { 352 TCPSTAT_INC(tcps_rcvshort); 353 m_freem(m); 354 goto skipped_pkt; 355 } 356 } 357 ip = (struct ip *)(eh + 1); 358 th = (struct tcphdr *)(ip + 1); 359 drop_hdrlen = sizeof(*ip); 360 iptos = ip->ip_tos; 361 tlen = ntohs(ip->ip_len) - sizeof(struct ip); 362 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 363 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 364 th->th_sum = m->m_pkthdr.csum_data; 365 else 366 th->th_sum = in_pseudo(ip->ip_src.s_addr, 367 ip->ip_dst.s_addr, 368 htonl(m->m_pkthdr.csum_data + tlen + 369 IPPROTO_TCP)); 370 th->th_sum ^= 0xffff; 371 } else { 372 int len; 373 struct ipovly *ipov = (struct ipovly *)ip; 374 /* 375 * Checksum extended TCP header and data. 376 */ 377 len = drop_hdrlen + tlen; 378 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 379 ipov->ih_len = htons(tlen); 380 th->th_sum = in_cksum(m, len); 381 /* Reset length for SDT probes. */ 382 ip->ip_len = htons(len); 383 /* Reset TOS bits */ 384 ip->ip_tos = iptos; 385 /* Re-initialization for later version check */ 386 ip->ip_v = IPVERSION; 387 ip->ip_hl = sizeof(*ip) >> 2; 388 } 389 if (th->th_sum) { 390 TCPSTAT_INC(tcps_rcvbadsum); 391 m_freem(m); 392 goto skipped_pkt; 393 } 394 break; 395 } 396 #endif 397 } 398 /* 399 * Convert TCP protocol specific fields to host format. 400 */ 401 tcp_fields_to_host(th); 402 403 off = th->th_off << 2; 404 if (off < sizeof (struct tcphdr) || off > tlen) { 405 TCPSTAT_INC(tcps_rcvbadoff); 406 m_freem(m); 407 goto skipped_pkt; 408 } 409 tlen -= off; 410 drop_hdrlen += off; 411 /* 412 * Now lets setup the timeval to be when we should 413 * have been called (if we can). 414 */ 415 m->m_pkthdr.lro_nsegs = 1; 416 if (m->m_flags & M_TSTMP_LRO) { 417 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 418 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 419 } else { 420 /* Should not be should we kassert instead? */ 421 tcp_get_usecs(&tv); 422 } 423 /* Now what about next packet? */ 424 if (m_save || has_pkt) 425 nxt_pkt = 1; 426 else 427 nxt_pkt = 0; 428 retval = (*tp->t_fb->tfb_do_segment_nounlock)(m, th, so, tp, drop_hdrlen, tlen, 429 iptos, nxt_pkt, &tv); 430 if (retval) { 431 /* We lost the lock and tcb probably */ 432 m = m_save; 433 while(m) { 434 m_save = m->m_nextpkt; 435 m->m_nextpkt = NULL; 436 m_freem(m); 437 m = m_save; 438 } 439 if (no_vn == 0) 440 CURVNET_RESTORE(); 441 return(retval); 442 } 443 skipped_pkt: 444 m = m_save; 445 } 446 if (no_vn == 0) 447 CURVNET_RESTORE(); 448 return(retval); 449 } 450 451 int 452 ctf_do_queued_segments(struct socket *so, struct tcpcb *tp, int have_pkt) 453 { 454 struct mbuf *m; 455 456 /* First lets see if we have old packets */ 457 if (tp->t_in_pkt) { 458 m = tp->t_in_pkt; 459 tp->t_in_pkt = NULL; 460 tp->t_tail_pkt = NULL; 461 if (ctf_process_inbound_raw(tp, so, m, have_pkt)) { 462 /* We lost the tcpcb (maybe a RST came in)? */ 463 return(1); 464 } 465 } 466 return (0); 467 } 468 469 uint32_t 470 ctf_outstanding(struct tcpcb *tp) 471 { 472 return(tp->snd_max - tp->snd_una); 473 } 474 475 uint32_t 476 ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked) 477 { 478 if (rc_sacked <= ctf_outstanding(tp)) 479 return(ctf_outstanding(tp) - rc_sacked); 480 else { 481 /* TSNH */ 482 #ifdef INVARIANTS 483 panic("tp:%p rc_sacked:%d > out:%d", 484 tp, rc_sacked, ctf_outstanding(tp)); 485 #endif 486 return (0); 487 } 488 } 489 490 void 491 ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, 492 int32_t rstreason, int32_t tlen) 493 { 494 if (tp != NULL) { 495 tcp_dropwithreset(m, th, tp, tlen, rstreason); 496 INP_WUNLOCK(tp->t_inpcb); 497 } else 498 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 499 } 500 501 /* 502 * ctf_drop_checks returns 1 for you should not proceed. It places 503 * in ret_val what should be returned 1/0 by the caller. The 1 indicates 504 * that the TCB is unlocked and probably dropped. The 0 indicates the 505 * TCB is still valid and locked. 506 */ 507 int 508 ctf_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val) 509 { 510 int32_t todrop; 511 int32_t thflags; 512 int32_t tlen; 513 514 thflags = *thf; 515 tlen = *tlenp; 516 todrop = tp->rcv_nxt - th->th_seq; 517 if (todrop > 0) { 518 if (thflags & TH_SYN) { 519 thflags &= ~TH_SYN; 520 th->th_seq++; 521 if (th->th_urp > 1) 522 th->th_urp--; 523 else 524 thflags &= ~TH_URG; 525 todrop--; 526 } 527 /* 528 * Following if statement from Stevens, vol. 2, p. 960. 529 */ 530 if (todrop > tlen 531 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 532 /* 533 * Any valid FIN must be to the left of the window. 534 * At this point the FIN must be a duplicate or out 535 * of sequence; drop it. 536 */ 537 thflags &= ~TH_FIN; 538 /* 539 * Send an ACK to resynchronize and drop any data. 540 * But keep on processing for RST or ACK. 541 */ 542 tp->t_flags |= TF_ACKNOW; 543 todrop = tlen; 544 TCPSTAT_INC(tcps_rcvduppack); 545 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 546 } else { 547 TCPSTAT_INC(tcps_rcvpartduppack); 548 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 549 } 550 /* 551 * DSACK - add SACK block for dropped range 552 */ 553 if (tp->t_flags & TF_SACK_PERMIT) { 554 tcp_update_sack_list(tp, th->th_seq, 555 th->th_seq + todrop); 556 /* 557 * ACK now, as the next in-sequence segment 558 * will clear the DSACK block again 559 */ 560 tp->t_flags |= TF_ACKNOW; 561 } 562 *drop_hdrlen += todrop; /* drop from the top afterwards */ 563 th->th_seq += todrop; 564 tlen -= todrop; 565 if (th->th_urp > todrop) 566 th->th_urp -= todrop; 567 else { 568 thflags &= ~TH_URG; 569 th->th_urp = 0; 570 } 571 } 572 /* 573 * If segment ends after window, drop trailing data (and PUSH and 574 * FIN); if nothing left, just ACK. 575 */ 576 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 577 if (todrop > 0) { 578 TCPSTAT_INC(tcps_rcvpackafterwin); 579 if (todrop >= tlen) { 580 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 581 /* 582 * If window is closed can only take segments at 583 * window edge, and have to drop data and PUSH from 584 * incoming segments. Continue processing, but 585 * remember to ack. Otherwise, drop segment and 586 * ack. 587 */ 588 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 589 tp->t_flags |= TF_ACKNOW; 590 TCPSTAT_INC(tcps_rcvwinprobe); 591 } else { 592 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 593 return (1); 594 } 595 } else 596 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 597 m_adj(m, -todrop); 598 tlen -= todrop; 599 thflags &= ~(TH_PUSH | TH_FIN); 600 } 601 *thf = thflags; 602 *tlenp = tlen; 603 return (0); 604 } 605 606 /* 607 * The value in ret_val informs the caller 608 * if we dropped the tcb (and lock) or not. 609 * 1 = we dropped it, 0 = the TCB is still locked 610 * and valid. 611 */ 612 void 613 ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val) 614 { 615 /* 616 * Generate an ACK dropping incoming segment if it occupies sequence 617 * space, where the ACK reflects our state. 618 * 619 * We can now skip the test for the RST flag since all paths to this 620 * code happen after packets containing RST have been dropped. 621 * 622 * In the SYN-RECEIVED state, don't send an ACK unless the segment 623 * we received passes the SYN-RECEIVED ACK test. If it fails send a 624 * RST. This breaks the loop in the "LAND" DoS attack, and also 625 * prevents an ACK storm between two listening ports that have been 626 * sent forged SYN segments, each with the source address of the 627 * other. 628 */ 629 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 630 (SEQ_GT(tp->snd_una, th->th_ack) || 631 SEQ_GT(th->th_ack, tp->snd_max))) { 632 *ret_val = 1; 633 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 634 return; 635 } else 636 *ret_val = 0; 637 tp->t_flags |= TF_ACKNOW; 638 if (m) 639 m_freem(m); 640 } 641 642 void 643 ctf_do_drop(struct mbuf *m, struct tcpcb *tp) 644 { 645 646 /* 647 * Drop space held by incoming segment and return. 648 */ 649 if (tp != NULL) 650 INP_WUNLOCK(tp->t_inpcb); 651 if (m) 652 m_freem(m); 653 } 654 655 int 656 ctf_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp) 657 { 658 /* 659 * RFC5961 Section 3.2 660 * 661 * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in 662 * window, we send challenge ACK. 663 * 664 * Note: to take into account delayed ACKs, we should test against 665 * last_ack_sent instead of rcv_nxt. Note 2: we handle special case 666 * of closed window, not covered by the RFC. 667 */ 668 int dropped = 0; 669 670 if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) && 671 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 672 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 673 674 KASSERT(tp->t_state != TCPS_SYN_SENT, 675 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 676 __func__, th, tp)); 677 678 if (V_tcp_insecure_rst || 679 (tp->last_ack_sent == th->th_seq) || 680 (tp->rcv_nxt == th->th_seq) || 681 ((tp->last_ack_sent - 1) == th->th_seq)) { 682 TCPSTAT_INC(tcps_drops); 683 /* Drop the connection. */ 684 switch (tp->t_state) { 685 case TCPS_SYN_RECEIVED: 686 so->so_error = ECONNREFUSED; 687 goto close; 688 case TCPS_ESTABLISHED: 689 case TCPS_FIN_WAIT_1: 690 case TCPS_FIN_WAIT_2: 691 case TCPS_CLOSE_WAIT: 692 case TCPS_CLOSING: 693 case TCPS_LAST_ACK: 694 so->so_error = ECONNRESET; 695 close: 696 tcp_state_change(tp, TCPS_CLOSED); 697 /* FALLTHROUGH */ 698 default: 699 tp = tcp_close(tp); 700 } 701 dropped = 1; 702 ctf_do_drop(m, tp); 703 } else { 704 TCPSTAT_INC(tcps_badrst); 705 /* Send challenge ACK. */ 706 tcp_respond(tp, mtod(m, void *), th, m, 707 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 708 tp->last_ack_sent = tp->rcv_nxt; 709 } 710 } else { 711 m_freem(m); 712 } 713 return (dropped); 714 } 715 716 /* 717 * The value in ret_val informs the caller 718 * if we dropped the tcb (and lock) or not. 719 * 1 = we dropped it, 0 = the TCB is still locked 720 * and valid. 721 */ 722 void 723 ctf_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ret_val) 724 { 725 726 NET_EPOCH_ASSERT(); 727 728 TCPSTAT_INC(tcps_badsyn); 729 if (V_tcp_insecure_syn && 730 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 731 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 732 tp = tcp_drop(tp, ECONNRESET); 733 *ret_val = 1; 734 ctf_do_drop(m, tp); 735 } else { 736 /* Send challenge ACK. */ 737 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 738 tp->snd_nxt, TH_ACK); 739 tp->last_ack_sent = tp->rcv_nxt; 740 m = NULL; 741 *ret_val = 0; 742 ctf_do_drop(m, NULL); 743 } 744 } 745 746 /* 747 * bbr_ts_check returns 1 for you should not proceed, the state 748 * machine should return. It places in ret_val what should 749 * be returned 1/0 by the caller (hpts_do_segment). The 1 indicates 750 * that the TCB is unlocked and probably dropped. The 0 indicates the 751 * TCB is still valid and locked. 752 */ 753 int 754 ctf_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 755 int32_t tlen, int32_t thflags, int32_t * ret_val) 756 { 757 758 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 759 /* 760 * Invalidate ts_recent. If this segment updates ts_recent, 761 * the age will be reset later and ts_recent will get a 762 * valid value. If it does not, setting ts_recent to zero 763 * will at least satisfy the requirement that zero be placed 764 * in the timestamp echo reply when ts_recent isn't valid. 765 * The age isn't reset until we get a valid ts_recent 766 * because we don't want out-of-order segments to be dropped 767 * when ts_recent is old. 768 */ 769 tp->ts_recent = 0; 770 } else { 771 TCPSTAT_INC(tcps_rcvduppack); 772 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 773 TCPSTAT_INC(tcps_pawsdrop); 774 *ret_val = 0; 775 if (tlen) { 776 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 777 } else { 778 ctf_do_drop(m, NULL); 779 } 780 return (1); 781 } 782 return (0); 783 } 784 785 void 786 ctf_calc_rwin(struct socket *so, struct tcpcb *tp) 787 { 788 int32_t win; 789 790 /* 791 * Calculate amount of space in receive window, and then do TCP 792 * input processing. Receive window is amount of space in rcv queue, 793 * but not less than advertised window. 794 */ 795 win = sbspace(&so->so_rcv); 796 if (win < 0) 797 win = 0; 798 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 799 } 800 801 void 802 ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, 803 int32_t rstreason, int32_t tlen) 804 { 805 806 if (tp->t_inpcb) { 807 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 808 } 809 tcp_dropwithreset(m, th, tp, tlen, rstreason); 810 INP_WUNLOCK(tp->t_inpcb); 811 } 812 813 uint32_t 814 ctf_fixed_maxseg(struct tcpcb *tp) 815 { 816 int optlen; 817 818 if (tp->t_flags & TF_NOOPT) 819 return (tp->t_maxseg); 820 821 /* 822 * Here we have a simplified code from tcp_addoptions(), 823 * without a proper loop, and having most of paddings hardcoded. 824 * We only consider fixed options that we would send every 825 * time I.e. SACK is not considered. 826 * 827 */ 828 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4) 829 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 830 if (tp->t_flags & TF_RCVD_TSTMP) 831 optlen = TCPOLEN_TSTAMP_APPA; 832 else 833 optlen = 0; 834 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 835 if (tp->t_flags & TF_SIGNATURE) 836 optlen += PAD(TCPOLEN_SIGNATURE); 837 #endif 838 } else { 839 if (tp->t_flags & TF_REQ_TSTMP) 840 optlen = TCPOLEN_TSTAMP_APPA; 841 else 842 optlen = PAD(TCPOLEN_MAXSEG); 843 if (tp->t_flags & TF_REQ_SCALE) 844 optlen += PAD(TCPOLEN_WINDOW); 845 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 846 if (tp->t_flags & TF_SIGNATURE) 847 optlen += PAD(TCPOLEN_SIGNATURE); 848 #endif 849 if (tp->t_flags & TF_SACK_PERMIT) 850 optlen += PAD(TCPOLEN_SACK_PERMITTED); 851 } 852 #undef PAD 853 optlen = min(optlen, TCP_MAXOLEN); 854 return (tp->t_maxseg - optlen); 855 } 856 857 void 858 ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_blocks) 859 { 860 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 861 union tcp_log_stackspecific log; 862 struct timeval tv; 863 864 memset(&log, 0, sizeof(log)); 865 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 866 log.u_bbr.flex8 = num_sack_blks; 867 if (num_sack_blks > 0) { 868 log.u_bbr.flex1 = sack_blocks[0].start; 869 log.u_bbr.flex2 = sack_blocks[0].end; 870 } 871 if (num_sack_blks > 1) { 872 log.u_bbr.flex3 = sack_blocks[1].start; 873 log.u_bbr.flex4 = sack_blocks[1].end; 874 } 875 if (num_sack_blks > 2) { 876 log.u_bbr.flex5 = sack_blocks[2].start; 877 log.u_bbr.flex6 = sack_blocks[2].end; 878 } 879 if (num_sack_blks > 3) { 880 log.u_bbr.applimited = sack_blocks[3].start; 881 log.u_bbr.pkts_out = sack_blocks[3].end; 882 } 883 TCP_LOG_EVENTP(tp, NULL, 884 &tp->t_inpcb->inp_socket->so_rcv, 885 &tp->t_inpcb->inp_socket->so_snd, 886 TCP_SACK_FILTER_RES, 0, 887 0, &log, false, &tv); 888 } 889 } 890 891 uint32_t 892 ctf_decay_count(uint32_t count, uint32_t decay) 893 { 894 /* 895 * Given a count, decay it by a set percentage. The 896 * percentage is in thousands i.e. 100% = 1000, 897 * 19.3% = 193. 898 */ 899 uint64_t perc_count, decay_per; 900 uint32_t decayed_count; 901 if (decay > 1000) { 902 /* We don't raise it */ 903 return (count); 904 } 905 perc_count = count; 906 decay_per = decay; 907 perc_count *= decay_per; 908 perc_count /= 1000; 909 /* 910 * So now perc_count holds the 911 * count decay value. 912 */ 913 decayed_count = count - (uint32_t)perc_count; 914 return(decayed_count); 915 } 916