1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 34 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 35 * $DragonFly: src/sys/netinet/tcp_subr.c,v 1.14 2004/03/08 19:44:32 hsu Exp $ 36 */ 37 38 #include "opt_compat.h" 39 #include "opt_inet6.h" 40 #include "opt_ipsec.h" 41 #include "opt_tcpdebug.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/callout.h> 46 #include <sys/kernel.h> 47 #include <sys/sysctl.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #ifdef INET6 51 #include <sys/domain.h> 52 #endif 53 #include <sys/proc.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/protosw.h> 57 #include <sys/random.h> 58 #include <sys/in_cksum.h> 59 60 #include <vm/vm_zone.h> 61 62 #include <net/route.h> 63 #include <net/if.h> 64 65 #define _IP_VHL 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/ip.h> 69 #ifdef INET6 70 #include <netinet/ip6.h> 71 #endif 72 #include <netinet/in_pcb.h> 73 #ifdef INET6 74 #include <netinet6/in6_pcb.h> 75 #endif 76 #include <netinet/in_var.h> 77 #include <netinet/ip_var.h> 78 #ifdef INET6 79 #include <netinet6/ip6_var.h> 80 #endif 81 #include <netinet/tcp.h> 82 #include <netinet/tcp_fsm.h> 83 #include <netinet/tcp_seq.h> 84 #include <netinet/tcp_timer.h> 85 #include <netinet/tcp_var.h> 86 #ifdef INET6 87 #include <netinet6/tcp6_var.h> 88 #endif 89 #include <netinet/tcpip.h> 90 #ifdef TCPDEBUG 91 #include <netinet/tcp_debug.h> 92 #endif 93 #include <netinet6/ip6protosw.h> 94 95 #ifdef IPSEC 96 #include <netinet6/ipsec.h> 97 #ifdef INET6 98 #include <netinet6/ipsec6.h> 99 #endif 100 #endif /*IPSEC*/ 101 102 #ifdef FAST_IPSEC 103 #include <netipsec/ipsec.h> 104 #ifdef INET6 105 #include <netipsec/ipsec6.h> 106 #endif 107 #define IPSEC 108 #endif /*FAST_IPSEC*/ 109 110 #include <sys/md5.h> 111 112 int tcp_mssdflt = TCP_MSS; 113 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 114 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 115 116 #ifdef INET6 117 int tcp_v6mssdflt = TCP6_MSS; 118 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 119 CTLFLAG_RW, &tcp_v6mssdflt , 0, 120 "Default TCP Maximum Segment Size for IPv6"); 121 #endif 122 123 #if 0 124 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 125 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 126 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 127 #endif 128 129 int tcp_do_rfc1323 = 1; 130 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 131 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 132 133 int tcp_do_rfc1644 = 0; 134 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 135 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 136 137 static int tcp_tcbhashsize = 0; 138 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 139 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 140 141 static int do_tcpdrain = 1; 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 143 "Enable tcp_drain routine for extra help when low on mbufs"); 144 145 /* XXX JH */ 146 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 147 &tcbinfo[0].ipi_count, 0, "Number of active PCBs"); 148 149 static int icmp_may_rst = 1; 150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 151 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 152 153 static int tcp_isn_reseed_interval = 0; 154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 155 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 156 157 /* 158 * TCP bandwidth limiting sysctls. Note that the default lower bound of 159 * 1024 exists only for debugging. A good production default would be 160 * something like 6100. 161 */ 162 static int tcp_inflight_enable = 0; 163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 164 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 165 166 static int tcp_inflight_debug = 0; 167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 168 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 169 170 static int tcp_inflight_min = 6144; 171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 172 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); 173 174 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 176 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); 177 178 static int tcp_inflight_stab = 20; 179 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 180 &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 2 packets)"); 181 182 static void tcp_cleartaocache (void); 183 static void tcp_notify (struct inpcb *, int); 184 185 /* 186 * Target size of TCP PCB hash tables. Must be a power of two. 187 * 188 * Note that this can be overridden by the kernel environment 189 * variable net.inet.tcp.tcbhashsize 190 */ 191 #ifndef TCBHASHSIZE 192 #define TCBHASHSIZE 512 193 #endif 194 195 /* 196 * This is the actual shape of what we allocate using the zone 197 * allocator. Doing it this way allows us to protect both structures 198 * using the same generation count, and also eliminates the overhead 199 * of allocating tcpcbs separately. By hiding the structure here, 200 * we avoid changing most of the rest of the code (although it needs 201 * to be changed, eventually, for greater efficiency). 202 */ 203 #define ALIGNMENT 32 204 #define ALIGNM1 (ALIGNMENT - 1) 205 struct inp_tp { 206 union { 207 struct inpcb inp; 208 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 209 } inp_tp_u; 210 struct tcpcb tcb; 211 struct callout inp_tp_rexmt, inp_tp_persist, inp_tp_keep, inp_tp_2msl; 212 struct callout inp_tp_delack; 213 }; 214 #undef ALIGNMENT 215 #undef ALIGNM1 216 217 /* 218 * Tcp initialization 219 */ 220 void 221 tcp_init() 222 { 223 struct inpcbporthead *porthashbase; 224 u_long porthashmask; 225 struct inpcbhead *bindhashbase; 226 u_long bindhashmask; 227 struct vm_zone *ipi_zone; 228 int hashsize = TCBHASHSIZE; 229 int cpu; 230 231 tcp_ccgen = 1; 232 tcp_cleartaocache(); 233 234 tcp_delacktime = TCPTV_DELACK; 235 tcp_keepinit = TCPTV_KEEP_INIT; 236 tcp_keepidle = TCPTV_KEEP_IDLE; 237 tcp_keepintvl = TCPTV_KEEPINTVL; 238 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 239 tcp_msl = TCPTV_MSL; 240 tcp_rexmit_min = TCPTV_MIN; 241 tcp_rexmit_slop = TCPTV_CPU_VAR; 242 243 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 244 if (!powerof2(hashsize)) { 245 printf("WARNING: TCB hash size not a power of 2\n"); 246 hashsize = 512; /* safe default */ 247 } 248 tcp_tcbhashsize = hashsize; 249 porthashbase = hashinit(hashsize, M_PCB, &porthashmask); 250 bindhashbase = hashinit(hashsize, M_PCB, &bindhashmask); 251 ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets, 252 ZONE_INTERRUPT, 0); 253 254 for (cpu = 0; cpu < ncpus2; cpu++) { 255 LIST_INIT(&tcbinfo[cpu].listhead); 256 tcbinfo[cpu].hashbase = hashinit(hashsize, M_PCB, 257 &tcbinfo[cpu].hashmask); 258 tcbinfo[cpu].porthashbase = porthashbase; 259 tcbinfo[cpu].porthashmask = porthashmask; 260 tcbinfo[cpu].bindhashbase = bindhashbase; 261 tcbinfo[cpu].bindhashmask = bindhashmask; 262 tcbinfo[cpu].ipi_zone = ipi_zone; 263 } 264 265 tcp_reass_maxseg = nmbclusters / 16; 266 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", 267 &tcp_reass_maxseg); 268 269 #ifdef INET6 270 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 271 #else /* INET6 */ 272 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 273 #endif /* INET6 */ 274 if (max_protohdr < TCP_MINPROTOHDR) 275 max_protohdr = TCP_MINPROTOHDR; 276 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 277 panic("tcp_init"); 278 #undef TCP_MINPROTOHDR 279 280 syncache_init(); 281 tcp_thread_init(); 282 } 283 284 /* 285 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 286 * tcp_template used to store this data in mbufs, but we now recopy it out 287 * of the tcpcb each time to conserve mbufs. 288 */ 289 void 290 tcp_fillheaders(tp, ip_ptr, tcp_ptr) 291 struct tcpcb *tp; 292 void *ip_ptr; 293 void *tcp_ptr; 294 { 295 struct inpcb *inp = tp->t_inpcb; 296 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 297 298 #ifdef INET6 299 if ((inp->inp_vflag & INP_IPV6) != 0) { 300 struct ip6_hdr *ip6; 301 302 ip6 = (struct ip6_hdr *)ip_ptr; 303 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 304 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 305 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 306 (IPV6_VERSION & IPV6_VERSION_MASK); 307 ip6->ip6_nxt = IPPROTO_TCP; 308 ip6->ip6_plen = sizeof(struct tcphdr); 309 ip6->ip6_src = inp->in6p_laddr; 310 ip6->ip6_dst = inp->in6p_faddr; 311 tcp_hdr->th_sum = 0; 312 } else 313 #endif 314 { 315 struct ip *ip = (struct ip *) ip_ptr; 316 317 ip->ip_vhl = IP_VHL_BORING; 318 ip->ip_tos = 0; 319 ip->ip_len = 0; 320 ip->ip_id = 0; 321 ip->ip_off = 0; 322 ip->ip_ttl = 0; 323 ip->ip_sum = 0; 324 ip->ip_p = IPPROTO_TCP; 325 ip->ip_src = inp->inp_laddr; 326 ip->ip_dst = inp->inp_faddr; 327 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 328 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 329 } 330 331 tcp_hdr->th_sport = inp->inp_lport; 332 tcp_hdr->th_dport = inp->inp_fport; 333 tcp_hdr->th_seq = 0; 334 tcp_hdr->th_ack = 0; 335 tcp_hdr->th_x2 = 0; 336 tcp_hdr->th_off = 5; 337 tcp_hdr->th_flags = 0; 338 tcp_hdr->th_win = 0; 339 tcp_hdr->th_urp = 0; 340 } 341 342 /* 343 * Create template to be used to send tcp packets on a connection. 344 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 345 * use for this function is in keepalives, which use tcp_respond. 346 */ 347 struct tcptemp * 348 tcp_maketemplate(tp) 349 struct tcpcb *tp; 350 { 351 struct mbuf *m; 352 struct tcptemp *n; 353 354 m = m_get(M_DONTWAIT, MT_HEADER); 355 if (m == NULL) 356 return (0); 357 m->m_len = sizeof(struct tcptemp); 358 n = mtod(m, struct tcptemp *); 359 360 tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t); 361 return (n); 362 } 363 364 /* 365 * Send a single message to the TCP at address specified by 366 * the given TCP/IP header. If m == 0, then we make a copy 367 * of the tcpiphdr at ti and send directly to the addressed host. 368 * This is used to force keep alive messages out using the TCP 369 * template for a connection. If flags are given then we send 370 * a message back to the TCP which originated the * segment ti, 371 * and discard the mbuf containing it and any other attached mbufs. 372 * 373 * In any case the ack and sequence number of the transmitted 374 * segment are as specified by the parameters. 375 * 376 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 377 */ 378 void 379 tcp_respond(tp, ipgen, th, m, ack, seq, flags) 380 struct tcpcb *tp; 381 void *ipgen; 382 struct tcphdr *th; 383 struct mbuf *m; 384 tcp_seq ack, seq; 385 int flags; 386 { 387 int tlen; 388 int win = 0; 389 struct route *ro = 0; 390 struct route sro; 391 struct ip *ip; 392 struct tcphdr *nth; 393 #ifdef INET6 394 struct route_in6 *ro6 = 0; 395 struct route_in6 sro6; 396 struct ip6_hdr *ip6; 397 int isipv6; 398 #endif /* INET6 */ 399 int ipflags = 0; 400 401 #ifdef INET6 402 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6; 403 ip6 = ipgen; 404 #endif /* INET6 */ 405 ip = ipgen; 406 407 if (tp) { 408 if (!(flags & TH_RST)) { 409 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 410 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 411 win = (long)TCP_MAXWIN << tp->rcv_scale; 412 } 413 #ifdef INET6 414 if (isipv6) 415 ro6 = &tp->t_inpcb->in6p_route; 416 else 417 #endif /* INET6 */ 418 ro = &tp->t_inpcb->inp_route; 419 } else { 420 #ifdef INET6 421 if (isipv6) { 422 ro6 = &sro6; 423 bzero(ro6, sizeof *ro6); 424 } else 425 #endif /* INET6 */ 426 { 427 ro = &sro; 428 bzero(ro, sizeof *ro); 429 } 430 } 431 if (m == 0) { 432 m = m_gethdr(M_DONTWAIT, MT_HEADER); 433 if (m == NULL) 434 return; 435 tlen = 0; 436 m->m_data += max_linkhdr; 437 #ifdef INET6 438 if (isipv6) { 439 bcopy((caddr_t)ip6, mtod(m, caddr_t), 440 sizeof(struct ip6_hdr)); 441 ip6 = mtod(m, struct ip6_hdr *); 442 nth = (struct tcphdr *)(ip6 + 1); 443 } else 444 #endif /* INET6 */ 445 { 446 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 447 ip = mtod(m, struct ip *); 448 nth = (struct tcphdr *)(ip + 1); 449 } 450 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 451 flags = TH_ACK; 452 } else { 453 m_freem(m->m_next); 454 m->m_next = 0; 455 m->m_data = (caddr_t)ipgen; 456 /* m_len is set later */ 457 tlen = 0; 458 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 459 #ifdef INET6 460 if (isipv6) { 461 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 462 nth = (struct tcphdr *)(ip6 + 1); 463 } else 464 #endif /* INET6 */ 465 { 466 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 467 nth = (struct tcphdr *)(ip + 1); 468 } 469 if (th != nth) { 470 /* 471 * this is usually a case when an extension header 472 * exists between the IPv6 header and the 473 * TCP header. 474 */ 475 nth->th_sport = th->th_sport; 476 nth->th_dport = th->th_dport; 477 } 478 xchg(nth->th_dport, nth->th_sport, n_short); 479 #undef xchg 480 } 481 #ifdef INET6 482 if (isipv6) { 483 ip6->ip6_flow = 0; 484 ip6->ip6_vfc = IPV6_VERSION; 485 ip6->ip6_nxt = IPPROTO_TCP; 486 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + 487 tlen)); 488 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 489 } else 490 #endif 491 { 492 tlen += sizeof (struct tcpiphdr); 493 ip->ip_len = tlen; 494 ip->ip_ttl = ip_defttl; 495 } 496 m->m_len = tlen; 497 m->m_pkthdr.len = tlen; 498 m->m_pkthdr.rcvif = (struct ifnet *) 0; 499 nth->th_seq = htonl(seq); 500 nth->th_ack = htonl(ack); 501 nth->th_x2 = 0; 502 nth->th_off = sizeof (struct tcphdr) >> 2; 503 nth->th_flags = flags; 504 if (tp) 505 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 506 else 507 nth->th_win = htons((u_short)win); 508 nth->th_urp = 0; 509 #ifdef INET6 510 if (isipv6) { 511 nth->th_sum = 0; 512 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 513 sizeof(struct ip6_hdr), 514 tlen - sizeof(struct ip6_hdr)); 515 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 516 ro6 && ro6->ro_rt ? 517 ro6->ro_rt->rt_ifp : 518 NULL); 519 } else 520 #endif /* INET6 */ 521 { 522 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 523 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 524 m->m_pkthdr.csum_flags = CSUM_TCP; 525 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 526 } 527 #ifdef TCPDEBUG 528 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 529 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 530 #endif 531 #ifdef INET6 532 if (isipv6) { 533 (void)ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 534 tp ? tp->t_inpcb : NULL); 535 if (ro6 == &sro6 && ro6->ro_rt) { 536 RTFREE(ro6->ro_rt); 537 ro6->ro_rt = NULL; 538 } 539 } else 540 #endif /* INET6 */ 541 { 542 (void) ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); 543 if (ro == &sro && ro->ro_rt) { 544 RTFREE(ro->ro_rt); 545 ro->ro_rt = NULL; 546 } 547 } 548 } 549 550 /* 551 * Create a new TCP control block, making an 552 * empty reassembly queue and hooking it to the argument 553 * protocol control block. The `inp' parameter must have 554 * come from the zone allocator set up in tcp_init(). 555 */ 556 struct tcpcb * 557 tcp_newtcpcb(inp) 558 struct inpcb *inp; 559 { 560 struct inp_tp *it; 561 struct tcpcb *tp; 562 #ifdef INET6 563 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 564 #endif /* INET6 */ 565 566 it = (struct inp_tp *)inp; 567 tp = &it->tcb; 568 bzero((char *) tp, sizeof(struct tcpcb)); 569 LIST_INIT(&tp->t_segq); 570 tp->t_maxseg = tp->t_maxopd = 571 #ifdef INET6 572 isipv6 ? tcp_v6mssdflt : 573 #endif /* INET6 */ 574 tcp_mssdflt; 575 576 /* Set up our timeouts. */ 577 callout_init(tp->tt_rexmt = &it->inp_tp_rexmt); 578 callout_init(tp->tt_persist = &it->inp_tp_persist); 579 callout_init(tp->tt_keep = &it->inp_tp_keep); 580 callout_init(tp->tt_2msl = &it->inp_tp_2msl); 581 callout_init(tp->tt_delack = &it->inp_tp_delack); 582 583 if (tcp_do_rfc1323) 584 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 585 if (tcp_do_rfc1644) 586 tp->t_flags |= TF_REQ_CC; 587 tp->t_inpcb = inp; /* XXX */ 588 /* 589 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 590 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 591 * reasonable initial retransmit time. 592 */ 593 tp->t_srtt = TCPTV_SRTTBASE; 594 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 595 tp->t_rttmin = tcp_rexmit_min; 596 tp->t_rxtcur = TCPTV_RTOBASE; 597 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 598 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 599 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 600 tp->t_rcvtime = ticks; 601 tp->t_bw_rtttime = ticks; 602 /* 603 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 604 * because the socket may be bound to an IPv6 wildcard address, 605 * which may match an IPv4-mapped IPv6 address. 606 */ 607 inp->inp_ip_ttl = ip_defttl; 608 inp->inp_ppcb = (caddr_t)tp; 609 return (tp); /* XXX */ 610 } 611 612 /* 613 * Drop a TCP connection, reporting 614 * the specified error. If connection is synchronized, 615 * then send a RST to peer. 616 */ 617 struct tcpcb * 618 tcp_drop(tp, errno) 619 struct tcpcb *tp; 620 int errno; 621 { 622 struct socket *so = tp->t_inpcb->inp_socket; 623 624 if (TCPS_HAVERCVDSYN(tp->t_state)) { 625 tp->t_state = TCPS_CLOSED; 626 (void) tcp_output(tp); 627 tcpstat.tcps_drops++; 628 } else 629 tcpstat.tcps_conndrops++; 630 if (errno == ETIMEDOUT && tp->t_softerror) 631 errno = tp->t_softerror; 632 so->so_error = errno; 633 return (tcp_close(tp)); 634 } 635 636 /* 637 * Close a TCP control block: 638 * discard all space held by the tcp 639 * discard internet protocol block 640 * wake up any sleepers 641 */ 642 struct tcpcb * 643 tcp_close(tp) 644 struct tcpcb *tp; 645 { 646 struct tseg_qent *q; 647 struct inpcb *inp = tp->t_inpcb; 648 struct socket *so = inp->inp_socket; 649 #ifdef INET6 650 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 651 #endif /* INET6 */ 652 struct rtentry *rt; 653 int dosavessthresh; 654 655 /* 656 * Make sure that all of our timers are stopped before we 657 * delete the PCB. 658 */ 659 callout_stop(tp->tt_rexmt); 660 callout_stop(tp->tt_persist); 661 callout_stop(tp->tt_keep); 662 callout_stop(tp->tt_2msl); 663 callout_stop(tp->tt_delack); 664 665 /* 666 * If we got enough samples through the srtt filter, 667 * save the rtt and rttvar in the routing entry. 668 * 'Enough' is arbitrarily defined as the 16 samples. 669 * 16 samples is enough for the srtt filter to converge 670 * to within 5% of the correct value; fewer samples and 671 * we could save a very bogus rtt. 672 * 673 * Don't update the default route's characteristics and don't 674 * update anything that the user "locked". 675 */ 676 if (tp->t_rttupdated >= 16) { 677 u_long i = 0; 678 #ifdef INET6 679 if (isipv6) { 680 struct sockaddr_in6 *sin6; 681 682 if ((rt = inp->in6p_route.ro_rt) == NULL) 683 goto no_valid_rt; 684 sin6 = (struct sockaddr_in6 *)rt_key(rt); 685 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 686 goto no_valid_rt; 687 } 688 else 689 #endif /* INET6 */ 690 if ((rt = inp->inp_route.ro_rt) == NULL || 691 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr 692 == INADDR_ANY) 693 goto no_valid_rt; 694 695 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 696 i = tp->t_srtt * 697 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 698 if (rt->rt_rmx.rmx_rtt && i) 699 /* 700 * filter this update to half the old & half 701 * the new values, converting scale. 702 * See route.h and tcp_var.h for a 703 * description of the scaling constants. 704 */ 705 rt->rt_rmx.rmx_rtt = 706 (rt->rt_rmx.rmx_rtt + i) / 2; 707 else 708 rt->rt_rmx.rmx_rtt = i; 709 tcpstat.tcps_cachedrtt++; 710 } 711 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 712 i = tp->t_rttvar * 713 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 714 if (rt->rt_rmx.rmx_rttvar && i) 715 rt->rt_rmx.rmx_rttvar = 716 (rt->rt_rmx.rmx_rttvar + i) / 2; 717 else 718 rt->rt_rmx.rmx_rttvar = i; 719 tcpstat.tcps_cachedrttvar++; 720 } 721 /* 722 * The old comment here said: 723 * update the pipelimit (ssthresh) if it has been updated 724 * already or if a pipesize was specified & the threshhold 725 * got below half the pipesize. I.e., wait for bad news 726 * before we start updating, then update on both good 727 * and bad news. 728 * 729 * But we want to save the ssthresh even if no pipesize is 730 * specified explicitly in the route, because such 731 * connections still have an implicit pipesize specified 732 * by the global tcp_sendspace. In the absence of a reliable 733 * way to calculate the pipesize, it will have to do. 734 */ 735 i = tp->snd_ssthresh; 736 if (rt->rt_rmx.rmx_sendpipe != 0) 737 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); 738 else 739 dosavessthresh = (i < so->so_snd.sb_hiwat / 2); 740 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 741 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) 742 || dosavessthresh) { 743 /* 744 * convert the limit from user data bytes to 745 * packets then to packet data bytes. 746 */ 747 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 748 if (i < 2) 749 i = 2; 750 i *= (u_long)(tp->t_maxseg + 751 #ifdef INET6 752 (isipv6 ? sizeof (struct ip6_hdr) + 753 sizeof (struct tcphdr) : 754 #endif 755 sizeof (struct tcpiphdr) 756 #ifdef INET6 757 ) 758 #endif 759 ); 760 if (rt->rt_rmx.rmx_ssthresh) 761 rt->rt_rmx.rmx_ssthresh = 762 (rt->rt_rmx.rmx_ssthresh + i) / 2; 763 else 764 rt->rt_rmx.rmx_ssthresh = i; 765 tcpstat.tcps_cachedssthresh++; 766 } 767 } 768 no_valid_rt: 769 /* free the reassembly queue, if any */ 770 while((q = LIST_FIRST(&tp->t_segq)) != NULL) { 771 LIST_REMOVE(q, tqe_q); 772 m_freem(q->tqe_m); 773 FREE(q, M_TSEGQ); 774 tcp_reass_qsize--; 775 } 776 inp->inp_ppcb = NULL; 777 soisdisconnected(so); 778 #ifdef INET6 779 if (INP_CHECK_SOCKAF(so, AF_INET6)) 780 in6_pcbdetach(inp); 781 else 782 #endif /* INET6 */ 783 in_pcbdetach(inp); 784 tcpstat.tcps_closed++; 785 return ((struct tcpcb *)0); 786 } 787 788 void 789 tcp_drain() 790 { 791 struct inpcb *inpb; 792 struct tcpcb *tcpb; 793 struct tseg_qent *te; 794 int cpu; 795 796 if (!do_tcpdrain) 797 return; 798 799 /* 800 * Walk the tcpbs, if existing, and flush the reassembly queue, 801 * if there is one... 802 * XXX: The "Net/3" implementation doesn't imply that the TCP 803 * reassembly queue should be flushed, but in a situation 804 * where we're really low on mbufs, this is potentially 805 * usefull. 806 */ 807 for (cpu = 0; cpu < ncpus2; cpu++) { 808 LIST_FOREACH(inpb, &tcbinfo[cpu].listhead, inp_list) { 809 if ((tcpb = intotcpcb(inpb))) { 810 while ((te = LIST_FIRST(&tcpb->t_segq)) 811 != NULL) { 812 LIST_REMOVE(te, tqe_q); 813 m_freem(te->tqe_m); 814 FREE(te, M_TSEGQ); 815 tcp_reass_qsize--; 816 } 817 } 818 } 819 } 820 } 821 822 /* 823 * Notify a tcp user of an asynchronous error; 824 * store error as soft error, but wake up user 825 * (for now, won't do anything until can select for soft error). 826 * 827 * Do not wake up user since there currently is no mechanism for 828 * reporting soft errors (yet - a kqueue filter may be added). 829 */ 830 static void 831 tcp_notify(inp, error) 832 struct inpcb *inp; 833 int error; 834 { 835 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 836 837 /* 838 * Ignore some errors if we are hooked up. 839 * If connection hasn't completed, has retransmitted several times, 840 * and receives a second error, give up now. This is better 841 * than waiting a long time to establish a connection that 842 * can never complete. 843 */ 844 if (tp->t_state == TCPS_ESTABLISHED && 845 (error == EHOSTUNREACH || error == ENETUNREACH || 846 error == EHOSTDOWN)) { 847 return; 848 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 849 tp->t_softerror) 850 tcp_drop(tp, error); 851 else 852 tp->t_softerror = error; 853 #if 0 854 wakeup((caddr_t) &so->so_timeo); 855 sorwakeup(so); 856 sowwakeup(so); 857 #endif 858 } 859 860 static int 861 tcp_pcblist(SYSCTL_HANDLER_ARGS) 862 { 863 int error, i, n, s; 864 struct inpcb *inp, **inp_list; 865 inp_gen_t gencnt; 866 struct xinpgen xig; 867 868 /* 869 * The process of preparing the TCB list is too time-consuming and 870 * resource-intensive to repeat twice on every request. 871 */ 872 if (req->oldptr == 0) { 873 n = tcbinfo[mycpu->gd_cpuid].ipi_count; 874 req->oldidx = 2 * (sizeof xig) 875 + (n + n/8) * sizeof(struct xtcpcb); 876 return 0; 877 } 878 879 if (req->newptr != 0) 880 return EPERM; 881 882 /* 883 * OK, now we're committed to doing something. 884 */ 885 s = splnet(); 886 gencnt = tcbinfo[mycpu->gd_cpuid].ipi_gencnt; 887 n = tcbinfo[mycpu->gd_cpuid].ipi_count; 888 splx(s); 889 890 xig.xig_len = sizeof xig; 891 xig.xig_count = n; 892 xig.xig_gen = gencnt; 893 xig.xig_sogen = so_gencnt; 894 error = SYSCTL_OUT(req, &xig, sizeof xig); 895 if (error) 896 return error; 897 898 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 899 if (inp_list == 0) 900 return ENOMEM; 901 902 s = splnet(); 903 for (inp = LIST_FIRST(&tcbinfo[mycpu->gd_cpuid].listhead), i = 0; 904 inp && i < n; inp = LIST_NEXT(inp, inp_list)) { 905 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->td, inp)) 906 inp_list[i++] = inp; 907 } 908 splx(s); 909 n = i; 910 911 error = 0; 912 for (i = 0; i < n; i++) { 913 inp = inp_list[i]; 914 if (inp->inp_gencnt <= gencnt) { 915 struct xtcpcb xt; 916 caddr_t inp_ppcb; 917 xt.xt_len = sizeof xt; 918 /* XXX should avoid extra copy */ 919 bcopy(inp, &xt.xt_inp, sizeof *inp); 920 inp_ppcb = inp->inp_ppcb; 921 if (inp_ppcb != NULL) 922 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 923 else 924 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 925 if (inp->inp_socket) 926 sotoxsocket(inp->inp_socket, &xt.xt_socket); 927 error = SYSCTL_OUT(req, &xt, sizeof xt); 928 } 929 } 930 if (!error) { 931 /* 932 * Give the user an updated idea of our state. 933 * If the generation differs from what we told 934 * her before, she knows that something happened 935 * while we were processing this request, and it 936 * might be necessary to retry. 937 */ 938 s = splnet(); 939 xig.xig_gen = tcbinfo[mycpu->gd_cpuid].ipi_gencnt; 940 xig.xig_sogen = so_gencnt; 941 xig.xig_count = tcbinfo[mycpu->gd_cpuid].ipi_count; 942 splx(s); 943 error = SYSCTL_OUT(req, &xig, sizeof xig); 944 } 945 free(inp_list, M_TEMP); 946 return error; 947 } 948 949 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 950 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 951 952 static int 953 tcp_getcred(SYSCTL_HANDLER_ARGS) 954 { 955 struct sockaddr_in addrs[2]; 956 struct inpcb *inp; 957 int cpu; 958 int error, s; 959 960 error = suser(req->td); 961 if (error) 962 return (error); 963 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 964 if (error) 965 return (error); 966 s = splnet(); 967 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 968 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 969 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 970 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 971 if (inp == NULL || inp->inp_socket == NULL) { 972 error = ENOENT; 973 goto out; 974 } 975 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 976 out: 977 splx(s); 978 return (error); 979 } 980 981 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW, 982 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 983 984 #ifdef INET6 985 static int 986 tcp6_getcred(SYSCTL_HANDLER_ARGS) 987 { 988 struct sockaddr_in6 addrs[2]; 989 struct inpcb *inp; 990 int error, s, mapped = 0; 991 992 error = suser(req->td); 993 if (error) 994 return (error); 995 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 996 if (error) 997 return (error); 998 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 999 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1000 mapped = 1; 1001 else 1002 return (EINVAL); 1003 } 1004 s = splnet(); 1005 if (mapped == 1) { 1006 inp = in_pcblookup_hash(&tcbinfo[0], 1007 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1008 addrs[1].sin6_port, 1009 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1010 addrs[0].sin6_port, 1011 0, NULL); 1012 } else { 1013 inp = in6_pcblookup_hash(&tcbinfo[0], 1014 &addrs[1].sin6_addr, addrs[1].sin6_port, 1015 &addrs[0].sin6_addr, addrs[0].sin6_port, 1016 0, NULL); 1017 } 1018 if (inp == NULL || inp->inp_socket == NULL) { 1019 error = ENOENT; 1020 goto out; 1021 } 1022 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, 1023 sizeof(struct ucred)); 1024 out: 1025 splx(s); 1026 return (error); 1027 } 1028 1029 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW, 1030 0, 0, 1031 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1032 #endif 1033 1034 1035 void 1036 tcp_ctlinput(cmd, sa, vip) 1037 int cmd; 1038 struct sockaddr *sa; 1039 void *vip; 1040 { 1041 struct ip *ip = vip; 1042 struct tcphdr *th; 1043 struct in_addr faddr; 1044 struct inpcb *inp; 1045 struct tcpcb *tp; 1046 void (*notify) (struct inpcb *, int) = tcp_notify; 1047 tcp_seq icmp_seq; 1048 int cpu; 1049 int s; 1050 1051 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1052 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1053 return; 1054 1055 if (cmd == PRC_QUENCH) 1056 notify = tcp_quench; 1057 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 1058 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) 1059 notify = tcp_drop_syn_sent; 1060 else if (cmd == PRC_MSGSIZE) 1061 notify = tcp_mtudisc; 1062 else if (PRC_IS_REDIRECT(cmd)) { 1063 ip = 0; 1064 notify = in_rtchange; 1065 } else if (cmd == PRC_HOSTDEAD) 1066 ip = 0; 1067 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0) 1068 return; 1069 if (ip) { 1070 s = splnet(); 1071 th = (struct tcphdr *)((caddr_t)ip 1072 + (IP_VHL_HL(ip->ip_vhl) << 2)); 1073 cpu = tcp_addrcpu(faddr.s_addr, th->th_dport, 1074 ip->ip_src.s_addr, th->th_sport); 1075 inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport, 1076 ip->ip_src, th->th_sport, 0, NULL); 1077 if (inp != NULL && inp->inp_socket != NULL) { 1078 icmp_seq = htonl(th->th_seq); 1079 tp = intotcpcb(inp); 1080 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1081 SEQ_LT(icmp_seq, tp->snd_max)) 1082 (*notify)(inp, inetctlerrmap[cmd]); 1083 } else { 1084 struct in_conninfo inc; 1085 1086 inc.inc_fport = th->th_dport; 1087 inc.inc_lport = th->th_sport; 1088 inc.inc_faddr = faddr; 1089 inc.inc_laddr = ip->ip_src; 1090 #ifdef INET6 1091 inc.inc_isipv6 = 0; 1092 #endif 1093 syncache_unreach(&inc, th); 1094 } 1095 splx(s); 1096 } else { 1097 for (cpu = 0; cpu < ncpus2; cpu++) 1098 in_pcbnotifyall(&tcbinfo[cpu].listhead, faddr, 1099 inetctlerrmap[cmd], notify); 1100 } 1101 } 1102 1103 #ifdef INET6 1104 void 1105 tcp6_ctlinput(cmd, sa, d) 1106 int cmd; 1107 struct sockaddr *sa; 1108 void *d; 1109 { 1110 struct tcphdr th; 1111 void (*notify) (struct inpcb *, int) = tcp_notify; 1112 struct ip6_hdr *ip6; 1113 struct mbuf *m; 1114 struct ip6ctlparam *ip6cp = NULL; 1115 const struct sockaddr_in6 *sa6_src = NULL; 1116 int off; 1117 struct tcp_portonly { 1118 u_int16_t th_sport; 1119 u_int16_t th_dport; 1120 } *thp; 1121 1122 if (sa->sa_family != AF_INET6 || 1123 sa->sa_len != sizeof(struct sockaddr_in6)) 1124 return; 1125 1126 if (cmd == PRC_QUENCH) 1127 notify = tcp_quench; 1128 else if (cmd == PRC_MSGSIZE) 1129 notify = tcp_mtudisc; 1130 else if (!PRC_IS_REDIRECT(cmd) && 1131 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1132 return; 1133 1134 /* if the parameter is from icmp6, decode it. */ 1135 if (d != NULL) { 1136 ip6cp = (struct ip6ctlparam *)d; 1137 m = ip6cp->ip6c_m; 1138 ip6 = ip6cp->ip6c_ip6; 1139 off = ip6cp->ip6c_off; 1140 sa6_src = ip6cp->ip6c_src; 1141 } else { 1142 m = NULL; 1143 ip6 = NULL; 1144 off = 0; /* fool gcc */ 1145 sa6_src = &sa6_any; 1146 } 1147 1148 if (ip6) { 1149 struct in_conninfo inc; 1150 /* 1151 * XXX: We assume that when IPV6 is non NULL, 1152 * M and OFF are valid. 1153 */ 1154 1155 /* check if we can safely examine src and dst ports */ 1156 if (m->m_pkthdr.len < off + sizeof(*thp)) 1157 return; 1158 1159 bzero(&th, sizeof(th)); 1160 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 1161 1162 in6_pcbnotify(&tcbinfo[0].listhead, sa, th.th_dport, 1163 (struct sockaddr *)ip6cp->ip6c_src, 1164 th.th_sport, cmd, notify); 1165 1166 inc.inc_fport = th.th_dport; 1167 inc.inc_lport = th.th_sport; 1168 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1169 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1170 inc.inc_isipv6 = 1; 1171 syncache_unreach(&inc, &th); 1172 } else 1173 in6_pcbnotify(&tcbinfo[0].listhead, sa, 0, 1174 (const struct sockaddr *)sa6_src, 0, cmd, notify); 1175 } 1176 #endif /* INET6 */ 1177 1178 1179 /* 1180 * Following is where TCP initial sequence number generation occurs. 1181 * 1182 * There are two places where we must use initial sequence numbers: 1183 * 1. In SYN-ACK packets. 1184 * 2. In SYN packets. 1185 * 1186 * All ISNs for SYN-ACK packets are generated by the syncache. See 1187 * tcp_syncache.c for details. 1188 * 1189 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1190 * depends on this property. In addition, these ISNs should be 1191 * unguessable so as to prevent connection hijacking. To satisfy 1192 * the requirements of this situation, the algorithm outlined in 1193 * RFC 1948 is used to generate sequence numbers. 1194 * 1195 * Implementation details: 1196 * 1197 * Time is based off the system timer, and is corrected so that it 1198 * increases by one megabyte per second. This allows for proper 1199 * recycling on high speed LANs while still leaving over an hour 1200 * before rollover. 1201 * 1202 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1203 * between seeding of isn_secret. This is normally set to zero, 1204 * as reseeding should not be necessary. 1205 * 1206 */ 1207 1208 #define ISN_BYTES_PER_SECOND 1048576 1209 1210 u_char isn_secret[32]; 1211 int isn_last_reseed; 1212 MD5_CTX isn_ctx; 1213 1214 tcp_seq 1215 tcp_new_isn(tp) 1216 struct tcpcb *tp; 1217 { 1218 u_int32_t md5_buffer[4]; 1219 tcp_seq new_isn; 1220 1221 /* Seed if this is the first use, reseed if requested. */ 1222 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1223 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1224 < (u_int)ticks))) { 1225 read_random_unlimited(&isn_secret, sizeof(isn_secret)); 1226 isn_last_reseed = ticks; 1227 } 1228 1229 /* Compute the md5 hash and return the ISN. */ 1230 MD5Init(&isn_ctx); 1231 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 1232 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 1233 #ifdef INET6 1234 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 1235 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1236 sizeof(struct in6_addr)); 1237 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1238 sizeof(struct in6_addr)); 1239 } else 1240 #endif 1241 { 1242 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1243 sizeof(struct in_addr)); 1244 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1245 sizeof(struct in_addr)); 1246 } 1247 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1248 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1249 new_isn = (tcp_seq) md5_buffer[0]; 1250 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1251 return new_isn; 1252 } 1253 1254 /* 1255 * When a source quench is received, close congestion window 1256 * to one segment. We will gradually open it again as we proceed. 1257 */ 1258 void 1259 tcp_quench(inp, errno) 1260 struct inpcb *inp; 1261 int errno; 1262 { 1263 struct tcpcb *tp = intotcpcb(inp); 1264 1265 if (tp) 1266 tp->snd_cwnd = tp->t_maxseg; 1267 } 1268 1269 /* 1270 * When a specific ICMP unreachable message is received and the 1271 * connection state is SYN-SENT, drop the connection. This behavior 1272 * is controlled by the icmp_may_rst sysctl. 1273 */ 1274 void 1275 tcp_drop_syn_sent(inp, errno) 1276 struct inpcb *inp; 1277 int errno; 1278 { 1279 struct tcpcb *tp = intotcpcb(inp); 1280 1281 if (tp && tp->t_state == TCPS_SYN_SENT) 1282 tcp_drop(tp, errno); 1283 } 1284 1285 /* 1286 * When `need fragmentation' ICMP is received, update our idea of the MSS 1287 * based on the new value in the route. Also nudge TCP to send something, 1288 * since we know the packet we just sent was dropped. 1289 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1290 */ 1291 void 1292 tcp_mtudisc(inp, errno) 1293 struct inpcb *inp; 1294 int errno; 1295 { 1296 struct tcpcb *tp = intotcpcb(inp); 1297 struct rtentry *rt; 1298 struct rmxp_tao *taop; 1299 struct socket *so = inp->inp_socket; 1300 int offered; 1301 int mss; 1302 #ifdef INET6 1303 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 1304 #endif /* INET6 */ 1305 1306 if (tp) { 1307 #ifdef INET6 1308 if (isipv6) 1309 rt = tcp_rtlookup6(&inp->inp_inc); 1310 else 1311 #endif /* INET6 */ 1312 rt = tcp_rtlookup(&inp->inp_inc); 1313 if (!rt || !rt->rt_rmx.rmx_mtu) { 1314 tp->t_maxopd = tp->t_maxseg = 1315 #ifdef INET6 1316 isipv6 ? tcp_v6mssdflt : 1317 #endif /* INET6 */ 1318 tcp_mssdflt; 1319 return; 1320 } 1321 taop = rmx_taop(rt->rt_rmx); 1322 offered = taop->tao_mssopt; 1323 mss = rt->rt_rmx.rmx_mtu - 1324 #ifdef INET6 1325 (isipv6 ? 1326 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1327 #endif /* INET6 */ 1328 sizeof(struct tcpiphdr) 1329 #ifdef INET6 1330 ) 1331 #endif /* INET6 */ 1332 ; 1333 1334 if (offered) 1335 mss = min(mss, offered); 1336 /* 1337 * XXX - The above conditional probably violates the TCP 1338 * spec. The problem is that, since we don't know the 1339 * other end's MSS, we are supposed to use a conservative 1340 * default. But, if we do that, then MTU discovery will 1341 * never actually take place, because the conservative 1342 * default is much less than the MTUs typically seen 1343 * on the Internet today. For the moment, we'll sweep 1344 * this under the carpet. 1345 * 1346 * The conservative default might not actually be a problem 1347 * if the only case this occurs is when sending an initial 1348 * SYN with options and data to a host we've never talked 1349 * to before. Then, they will reply with an MSS value which 1350 * will get recorded and the new parameters should get 1351 * recomputed. For Further Study. 1352 */ 1353 if (tp->t_maxopd <= mss) 1354 return; 1355 tp->t_maxopd = mss; 1356 1357 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1358 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1359 mss -= TCPOLEN_TSTAMP_APPA; 1360 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1361 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1362 mss -= TCPOLEN_CC_APPA; 1363 #if (MCLBYTES & (MCLBYTES - 1)) == 0 1364 if (mss > MCLBYTES) 1365 mss &= ~(MCLBYTES-1); 1366 #else 1367 if (mss > MCLBYTES) 1368 mss = mss / MCLBYTES * MCLBYTES; 1369 #endif 1370 if (so->so_snd.sb_hiwat < mss) 1371 mss = so->so_snd.sb_hiwat; 1372 1373 tp->t_maxseg = mss; 1374 1375 tcpstat.tcps_mturesent++; 1376 tp->t_rtttime = 0; 1377 tp->snd_nxt = tp->snd_una; 1378 tcp_output(tp); 1379 } 1380 } 1381 1382 /* 1383 * Look-up the routing entry to the peer of this inpcb. If no route 1384 * is found and it cannot be allocated the return NULL. This routine 1385 * is called by TCP routines that access the rmx structure and by tcp_mss 1386 * to get the interface MTU. 1387 */ 1388 struct rtentry * 1389 tcp_rtlookup(inc) 1390 struct in_conninfo *inc; 1391 { 1392 struct route *ro; 1393 struct rtentry *rt; 1394 1395 ro = &inc->inc_route; 1396 rt = ro->ro_rt; 1397 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1398 /* No route yet, so try to acquire one */ 1399 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1400 ro->ro_dst.sa_family = AF_INET; 1401 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1402 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1403 inc->inc_faddr; 1404 rtalloc(ro); 1405 rt = ro->ro_rt; 1406 } 1407 } 1408 return rt; 1409 } 1410 1411 #ifdef INET6 1412 struct rtentry * 1413 tcp_rtlookup6(inc) 1414 struct in_conninfo *inc; 1415 { 1416 struct route_in6 *ro6; 1417 struct rtentry *rt; 1418 1419 ro6 = &inc->inc6_route; 1420 rt = ro6->ro_rt; 1421 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1422 /* No route yet, so try to acquire one */ 1423 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1424 ro6->ro_dst.sin6_family = AF_INET6; 1425 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1426 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1427 rtalloc((struct route *)ro6); 1428 rt = ro6->ro_rt; 1429 } 1430 } 1431 return rt; 1432 } 1433 #endif /* INET6 */ 1434 1435 #ifdef IPSEC 1436 /* compute ESP/AH header size for TCP, including outer IP header. */ 1437 size_t 1438 ipsec_hdrsiz_tcp(tp) 1439 struct tcpcb *tp; 1440 { 1441 struct inpcb *inp; 1442 struct mbuf *m; 1443 size_t hdrsiz; 1444 struct ip *ip; 1445 #ifdef INET6 1446 struct ip6_hdr *ip6; 1447 #endif /* INET6 */ 1448 struct tcphdr *th; 1449 1450 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1451 return 0; 1452 MGETHDR(m, M_DONTWAIT, MT_DATA); 1453 if (!m) 1454 return 0; 1455 1456 #ifdef INET6 1457 if ((inp->inp_vflag & INP_IPV6) != 0) { 1458 ip6 = mtod(m, struct ip6_hdr *); 1459 th = (struct tcphdr *)(ip6 + 1); 1460 m->m_pkthdr.len = m->m_len = 1461 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1462 tcp_fillheaders(tp, ip6, th); 1463 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1464 } else 1465 #endif /* INET6 */ 1466 { 1467 ip = mtod(m, struct ip *); 1468 th = (struct tcphdr *)(ip + 1); 1469 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1470 tcp_fillheaders(tp, ip, th); 1471 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1472 } 1473 1474 m_free(m); 1475 return hdrsiz; 1476 } 1477 #endif /*IPSEC*/ 1478 1479 /* 1480 * Return a pointer to the cached information about the remote host. 1481 * The cached information is stored in the protocol specific part of 1482 * the route metrics. 1483 */ 1484 struct rmxp_tao * 1485 tcp_gettaocache(inc) 1486 struct in_conninfo *inc; 1487 { 1488 struct rtentry *rt; 1489 1490 #ifdef INET6 1491 if (inc->inc_isipv6) 1492 rt = tcp_rtlookup6(inc); 1493 else 1494 #endif /* INET6 */ 1495 rt = tcp_rtlookup(inc); 1496 1497 /* Make sure this is a host route and is up. */ 1498 if (rt == NULL || 1499 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) 1500 return NULL; 1501 1502 return rmx_taop(rt->rt_rmx); 1503 } 1504 1505 /* 1506 * Clear all the TAO cache entries, called from tcp_init. 1507 * 1508 * XXX 1509 * This routine is just an empty one, because we assume that the routing 1510 * routing tables are initialized at the same time when TCP, so there is 1511 * nothing in the cache left over. 1512 */ 1513 static void 1514 tcp_cleartaocache() 1515 { 1516 } 1517 1518 /* 1519 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1520 * 1521 * This code attempts to calculate the bandwidth-delay product as a 1522 * means of determining the optimal window size to maximize bandwidth, 1523 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1524 * routers. This code also does a fairly good job keeping RTTs in check 1525 * across slow links like modems. We implement an algorithm which is very 1526 * similar (but not meant to be) TCP/Vegas. The code operates on the 1527 * transmitter side of a TCP connection and so only effects the transmit 1528 * side of the connection. 1529 * 1530 * BACKGROUND: TCP makes no provision for the management of buffer space 1531 * at the end points or at the intermediate routers and switches. A TCP 1532 * stream, whether using NewReno or not, will eventually buffer as 1533 * many packets as it is able and the only reason this typically works is 1534 * due to the fairly small default buffers made available for a connection 1535 * (typicaly 16K or 32K). As machines use larger windows and/or window 1536 * scaling it is now fairly easy for even a single TCP connection to blow-out 1537 * all available buffer space not only on the local interface, but on 1538 * intermediate routers and switches as well. NewReno makes a misguided 1539 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1540 * then backing off, then steadily increasing the window again until another 1541 * failure occurs, ad-infinitum. This results in terrible oscillation that 1542 * is only made worse as network loads increase and the idea of intentionally 1543 * blowing out network buffers is, frankly, a terrible way to manage network 1544 * resources. 1545 * 1546 * It is far better to limit the transmit window prior to the failure 1547 * condition being achieved. There are two general ways to do this: First 1548 * you can 'scan' through different transmit window sizes and locate the 1549 * point where the RTT stops increasing, indicating that you have filled the 1550 * pipe, then scan backwards until you note that RTT stops decreasing, then 1551 * repeat ad-infinitum. This method works in principle but has severe 1552 * implementation issues due to RTT variances, timer granularity, and 1553 * instability in the algorithm which can lead to many false positives and 1554 * create oscillations as well as interact badly with other TCP streams 1555 * implementing the same algorithm. 1556 * 1557 * The second method is to limit the window to the bandwidth delay product 1558 * of the link. This is the method we implement. RTT variances and our 1559 * own manipulation of the congestion window, bwnd, can potentially 1560 * destabilize the algorithm. For this reason we have to stabilize the 1561 * elements used to calculate the window. We do this by using the minimum 1562 * observed RTT, the long term average of the observed bandwidth, and 1563 * by adding two segments worth of slop. It isn't perfect but it is able 1564 * to react to changing conditions and gives us a very stable basis on 1565 * which to extend the algorithm. 1566 */ 1567 void 1568 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1569 { 1570 u_long bw; 1571 u_long bwnd; 1572 int save_ticks; 1573 1574 /* 1575 * If inflight_enable is disabled in the middle of a tcp connection, 1576 * make sure snd_bwnd is effectively disabled. 1577 */ 1578 if (tcp_inflight_enable == 0) { 1579 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1580 tp->snd_bandwidth = 0; 1581 return; 1582 } 1583 1584 /* 1585 * Figure out the bandwidth. Due to the tick granularity this 1586 * is a very rough number and it MUST be averaged over a fairly 1587 * long period of time. XXX we need to take into account a link 1588 * that is not using all available bandwidth, but for now our 1589 * slop will ramp us up if this case occurs and the bandwidth later 1590 * increases. 1591 * 1592 * Note: if ticks rollover 'bw' may wind up negative. We must 1593 * effectively reset t_bw_rtttime for this case. 1594 */ 1595 save_ticks = ticks; 1596 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1597 return; 1598 1599 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1600 (save_ticks - tp->t_bw_rtttime); 1601 tp->t_bw_rtttime = save_ticks; 1602 tp->t_bw_rtseq = ack_seq; 1603 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1604 return; 1605 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1606 1607 tp->snd_bandwidth = bw; 1608 1609 /* 1610 * Calculate the semi-static bandwidth delay product, plus two maximal 1611 * segments. The additional slop puts us squarely in the sweet 1612 * spot and also handles the bandwidth run-up case. Without the 1613 * slop we could be locking ourselves into a lower bandwidth. 1614 * 1615 * Situations Handled: 1616 * (1) Prevents over-queueing of packets on LANs, especially on 1617 * high speed LANs, allowing larger TCP buffers to be 1618 * specified, and also does a good job preventing 1619 * over-queueing of packets over choke points like modems 1620 * (at least for the transmit side). 1621 * 1622 * (2) Is able to handle changing network loads (bandwidth 1623 * drops so bwnd drops, bandwidth increases so bwnd 1624 * increases). 1625 * 1626 * (3) Theoretically should stabilize in the face of multiple 1627 * connections implementing the same algorithm (this may need 1628 * a little work). 1629 * 1630 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1631 * be adjusted with a sysctl but typically only needs to be on 1632 * very slow connections. A value no smaller then 5 should 1633 * be used, but only reduce this default if you have no other 1634 * choice. 1635 */ 1636 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1637 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * (int)tp->t_maxseg / 10; 1638 #undef USERTT 1639 1640 if (tcp_inflight_debug > 0) { 1641 static int ltime; 1642 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1643 ltime = ticks; 1644 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1645 tp, 1646 bw, 1647 tp->t_rttbest, 1648 tp->t_srtt, 1649 bwnd 1650 ); 1651 } 1652 } 1653 if ((long)bwnd < tcp_inflight_min) 1654 bwnd = tcp_inflight_min; 1655 if (bwnd > tcp_inflight_max) 1656 bwnd = tcp_inflight_max; 1657 if ((long)bwnd < tp->t_maxseg * 2) 1658 bwnd = tp->t_maxseg * 2; 1659 tp->snd_bwnd = bwnd; 1660 } 1661