1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 68 * $DragonFly: src/sys/netinet/tcp_subr.c,v 1.63 2008/11/11 10:46:58 sephe Exp $ 69 */ 70 71 #include "opt_compat.h" 72 #include "opt_inet6.h" 73 #include "opt_ipsec.h" 74 #include "opt_tcpdebug.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/callout.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/malloc.h> 82 #include <sys/mpipe.h> 83 #include <sys/mbuf.h> 84 #ifdef INET6 85 #include <sys/domain.h> 86 #endif 87 #include <sys/proc.h> 88 #include <sys/priv.h> 89 #include <sys/socket.h> 90 #include <sys/socketvar.h> 91 #include <sys/protosw.h> 92 #include <sys/random.h> 93 #include <sys/in_cksum.h> 94 #include <sys/ktr.h> 95 96 #include <net/route.h> 97 #include <net/if.h> 98 #include <net/netisr.h> 99 100 #define _IP_VHL 101 #include <netinet/in.h> 102 #include <netinet/in_systm.h> 103 #include <netinet/ip.h> 104 #include <netinet/ip6.h> 105 #include <netinet/in_pcb.h> 106 #include <netinet6/in6_pcb.h> 107 #include <netinet/in_var.h> 108 #include <netinet/ip_var.h> 109 #include <netinet6/ip6_var.h> 110 #include <netinet/ip_icmp.h> 111 #ifdef INET6 112 #include <netinet/icmp6.h> 113 #endif 114 #include <netinet/tcp.h> 115 #include <netinet/tcp_fsm.h> 116 #include <netinet/tcp_seq.h> 117 #include <netinet/tcp_timer.h> 118 #include <netinet/tcp_timer2.h> 119 #include <netinet/tcp_var.h> 120 #include <netinet6/tcp6_var.h> 121 #include <netinet/tcpip.h> 122 #ifdef TCPDEBUG 123 #include <netinet/tcp_debug.h> 124 #endif 125 #include <netinet6/ip6protosw.h> 126 127 #ifdef IPSEC 128 #include <netinet6/ipsec.h> 129 #ifdef INET6 130 #include <netinet6/ipsec6.h> 131 #endif 132 #endif 133 134 #ifdef FAST_IPSEC 135 #include <netproto/ipsec/ipsec.h> 136 #ifdef INET6 137 #include <netproto/ipsec/ipsec6.h> 138 #endif 139 #define IPSEC 140 #endif 141 142 #include <sys/md5.h> 143 #include <machine/smp.h> 144 145 #include <sys/msgport2.h> 146 #include <sys/mplock2.h> 147 #include <net/netmsg2.h> 148 149 #if !defined(KTR_TCP) 150 #define KTR_TCP KTR_ALL 151 #endif 152 KTR_INFO_MASTER(tcp); 153 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0); 154 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0); 155 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0); 156 #define logtcp(name) KTR_LOG(tcp_ ## name) 157 158 struct inpcbinfo tcbinfo[MAXCPU]; 159 struct tcpcbackqhead tcpcbackq[MAXCPU]; 160 161 int tcp_mpsafe_proto = 0; 162 TUNABLE_INT("net.inet.tcp.mpsafe_proto", &tcp_mpsafe_proto); 163 164 static int tcp_mpsafe_thread = NETMSG_SERVICE_ADAPTIVE; 165 TUNABLE_INT("net.inet.tcp.mpsafe_thread", &tcp_mpsafe_thread); 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, mpsafe_thread, CTLFLAG_RW, 167 &tcp_mpsafe_thread, 0, 168 "0:BGL, 1:Adaptive BGL, 2:No BGL(experimental)"); 169 170 int tcp_mssdflt = TCP_MSS; 171 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 172 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); 173 174 #ifdef INET6 175 int tcp_v6mssdflt = TCP6_MSS; 176 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, 177 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); 178 #endif 179 180 /* 181 * Minimum MSS we accept and use. This prevents DoS attacks where 182 * we are forced to a ridiculous low MSS like 20 and send hundreds 183 * of packets instead of one. The effect scales with the available 184 * bandwidth and quickly saturates the CPU and network interface 185 * with packet generation and sending. Set to zero to disable MINMSS 186 * checking. This setting prevents us from sending too small packets. 187 */ 188 int tcp_minmss = TCP_MINMSS; 189 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 190 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 191 192 #if 0 193 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 194 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 195 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); 196 #endif 197 198 int tcp_do_rfc1323 = 1; 199 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 200 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); 201 202 static int tcp_tcbhashsize = 0; 203 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 204 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); 205 206 static int do_tcpdrain = 1; 207 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 208 "Enable tcp_drain routine for extra help when low on mbufs"); 209 210 static int icmp_may_rst = 1; 211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 212 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 213 214 static int tcp_isn_reseed_interval = 0; 215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 216 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 217 218 /* 219 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on 220 * by default, but with generous values which should allow maximal 221 * bandwidth. In particular, the slop defaults to 50 (5 packets). 222 * 223 * The reason for doing this is that the limiter is the only mechanism we 224 * have which seems to do a really good job preventing receiver RX rings 225 * on network interfaces from getting blown out. Even though GigE/10GigE 226 * is supposed to flow control it looks like either it doesn't actually 227 * do it or Open Source drivers do not properly enable it. 228 * 229 * People using the limiter to reduce bottlenecks on slower WAN connections 230 * should set the slop to 20 (2 packets). 231 */ 232 static int tcp_inflight_enable = 1; 233 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 234 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 235 236 static int tcp_inflight_debug = 0; 237 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 238 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 239 240 static int tcp_inflight_min = 6144; 241 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 242 &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); 243 244 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 245 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 246 &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); 247 248 static int tcp_inflight_stab = 50; 249 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 250 &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 3 packets)"); 251 252 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); 253 static struct malloc_pipe tcptemp_mpipe; 254 255 static void tcp_willblock(int); 256 static void tcp_notify (struct inpcb *, int); 257 258 struct tcp_stats tcpstats_percpu[MAXCPU]; 259 #ifdef SMP 260 static int 261 sysctl_tcpstats(SYSCTL_HANDLER_ARGS) 262 { 263 int cpu, error = 0; 264 265 for (cpu = 0; cpu < ncpus; ++cpu) { 266 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu], 267 sizeof(struct tcp_stats)))) 268 break; 269 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu], 270 sizeof(struct tcp_stats)))) 271 break; 272 } 273 274 return (error); 275 } 276 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 277 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); 278 #else 279 SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 280 &tcpstat, tcp_stats, "TCP statistics"); 281 #endif 282 283 /* 284 * Target size of TCP PCB hash tables. Must be a power of two. 285 * 286 * Note that this can be overridden by the kernel environment 287 * variable net.inet.tcp.tcbhashsize 288 */ 289 #ifndef TCBHASHSIZE 290 #define TCBHASHSIZE 512 291 #endif 292 293 /* 294 * This is the actual shape of what we allocate using the zone 295 * allocator. Doing it this way allows us to protect both structures 296 * using the same generation count, and also eliminates the overhead 297 * of allocating tcpcbs separately. By hiding the structure here, 298 * we avoid changing most of the rest of the code (although it needs 299 * to be changed, eventually, for greater efficiency). 300 */ 301 #define ALIGNMENT 32 302 #define ALIGNM1 (ALIGNMENT - 1) 303 struct inp_tp { 304 union { 305 struct inpcb inp; 306 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 307 } inp_tp_u; 308 struct tcpcb tcb; 309 struct tcp_callout inp_tp_rexmt; 310 struct tcp_callout inp_tp_persist; 311 struct tcp_callout inp_tp_keep; 312 struct tcp_callout inp_tp_2msl; 313 struct tcp_callout inp_tp_delack; 314 struct netmsg_tcp_timer inp_tp_timermsg; 315 }; 316 #undef ALIGNMENT 317 #undef ALIGNM1 318 319 /* 320 * Tcp initialization 321 */ 322 void 323 tcp_init(void) 324 { 325 struct inpcbporthead *porthashbase; 326 u_long porthashmask; 327 int hashsize = TCBHASHSIZE; 328 int cpu; 329 330 /* 331 * note: tcptemp is used for keepalives, and it is ok for an 332 * allocation to fail so do not specify MPF_INT. 333 */ 334 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), 335 25, -1, 0, NULL); 336 337 tcp_delacktime = TCPTV_DELACK; 338 tcp_keepinit = TCPTV_KEEP_INIT; 339 tcp_keepidle = TCPTV_KEEP_IDLE; 340 tcp_keepintvl = TCPTV_KEEPINTVL; 341 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 342 tcp_msl = TCPTV_MSL; 343 tcp_rexmit_min = TCPTV_MIN; 344 tcp_rexmit_slop = TCPTV_CPU_VAR; 345 346 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 347 if (!powerof2(hashsize)) { 348 kprintf("WARNING: TCB hash size not a power of 2\n"); 349 hashsize = 512; /* safe default */ 350 } 351 tcp_tcbhashsize = hashsize; 352 porthashbase = hashinit(hashsize, M_PCB, &porthashmask); 353 354 for (cpu = 0; cpu < ncpus2; cpu++) { 355 in_pcbinfo_init(&tcbinfo[cpu]); 356 tcbinfo[cpu].cpu = cpu; 357 tcbinfo[cpu].hashbase = hashinit(hashsize, M_PCB, 358 &tcbinfo[cpu].hashmask); 359 tcbinfo[cpu].porthashbase = porthashbase; 360 tcbinfo[cpu].porthashmask = porthashmask; 361 tcbinfo[cpu].wildcardhashbase = hashinit(hashsize, M_PCB, 362 &tcbinfo[cpu].wildcardhashmask); 363 tcbinfo[cpu].ipi_size = sizeof(struct inp_tp); 364 TAILQ_INIT(&tcpcbackq[cpu]); 365 } 366 367 tcp_reass_maxseg = nmbclusters / 16; 368 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); 369 370 #ifdef INET6 371 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 372 #else 373 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 374 #endif 375 if (max_protohdr < TCP_MINPROTOHDR) 376 max_protohdr = TCP_MINPROTOHDR; 377 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 378 panic("tcp_init"); 379 #undef TCP_MINPROTOHDR 380 381 /* 382 * Initialize TCP statistics counters for each CPU. 383 */ 384 #ifdef SMP 385 for (cpu = 0; cpu < ncpus; ++cpu) { 386 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); 387 } 388 #else 389 bzero(&tcpstat, sizeof(struct tcp_stats)); 390 #endif 391 392 syncache_init(); 393 tcp_thread_init(); 394 } 395 396 void 397 tcpmsg_service_loop(void *dummy) 398 { 399 struct netmsg *msg; 400 int mplocked; 401 402 /* 403 * Thread was started with TDF_MPSAFE 404 */ 405 mplocked = 0; 406 407 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) { 408 do { 409 logtcp(rxmsg); 410 mplocked = netmsg_service(msg, tcp_mpsafe_thread, 411 mplocked); 412 } while ((msg = lwkt_getport(&curthread->td_msgport)) != NULL); 413 414 logtcp(delayed); 415 tcp_willblock(mplocked); 416 logtcp(wait); 417 } 418 } 419 420 static void 421 tcp_willblock(int mplocked) 422 { 423 struct tcpcb *tp; 424 int cpu = mycpu->gd_cpuid; 425 int unlock = 0; 426 427 if (!mplocked && !tcp_mpsafe_proto) { 428 if (TAILQ_EMPTY(&tcpcbackq[cpu])) 429 return; 430 431 get_mplock(); 432 mplocked = 1; 433 unlock = 1; 434 } 435 436 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) { 437 KKASSERT(tp->t_flags & TF_ONOUTPUTQ); 438 tp->t_flags &= ~TF_ONOUTPUTQ; 439 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq); 440 tcp_output(tp); 441 } 442 443 if (unlock) 444 rel_mplock(); 445 } 446 447 448 /* 449 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 450 * tcp_template used to store this data in mbufs, but we now recopy it out 451 * of the tcpcb each time to conserve mbufs. 452 */ 453 void 454 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) 455 { 456 struct inpcb *inp = tp->t_inpcb; 457 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 458 459 #ifdef INET6 460 if (inp->inp_vflag & INP_IPV6) { 461 struct ip6_hdr *ip6; 462 463 ip6 = (struct ip6_hdr *)ip_ptr; 464 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 465 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 466 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 467 (IPV6_VERSION & IPV6_VERSION_MASK); 468 ip6->ip6_nxt = IPPROTO_TCP; 469 ip6->ip6_plen = sizeof(struct tcphdr); 470 ip6->ip6_src = inp->in6p_laddr; 471 ip6->ip6_dst = inp->in6p_faddr; 472 tcp_hdr->th_sum = 0; 473 } else 474 #endif 475 { 476 struct ip *ip = (struct ip *) ip_ptr; 477 478 ip->ip_vhl = IP_VHL_BORING; 479 ip->ip_tos = 0; 480 ip->ip_len = 0; 481 ip->ip_id = 0; 482 ip->ip_off = 0; 483 ip->ip_ttl = 0; 484 ip->ip_sum = 0; 485 ip->ip_p = IPPROTO_TCP; 486 ip->ip_src = inp->inp_laddr; 487 ip->ip_dst = inp->inp_faddr; 488 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, 489 ip->ip_dst.s_addr, 490 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 491 } 492 493 tcp_hdr->th_sport = inp->inp_lport; 494 tcp_hdr->th_dport = inp->inp_fport; 495 tcp_hdr->th_seq = 0; 496 tcp_hdr->th_ack = 0; 497 tcp_hdr->th_x2 = 0; 498 tcp_hdr->th_off = 5; 499 tcp_hdr->th_flags = 0; 500 tcp_hdr->th_win = 0; 501 tcp_hdr->th_urp = 0; 502 } 503 504 /* 505 * Create template to be used to send tcp packets on a connection. 506 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 507 * use for this function is in keepalives, which use tcp_respond. 508 */ 509 struct tcptemp * 510 tcp_maketemplate(struct tcpcb *tp) 511 { 512 struct tcptemp *tmp; 513 514 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) 515 return (NULL); 516 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t); 517 return (tmp); 518 } 519 520 void 521 tcp_freetemplate(struct tcptemp *tmp) 522 { 523 mpipe_free(&tcptemp_mpipe, tmp); 524 } 525 526 /* 527 * Send a single message to the TCP at address specified by 528 * the given TCP/IP header. If m == NULL, then we make a copy 529 * of the tcpiphdr at ti and send directly to the addressed host. 530 * This is used to force keep alive messages out using the TCP 531 * template for a connection. If flags are given then we send 532 * a message back to the TCP which originated the * segment ti, 533 * and discard the mbuf containing it and any other attached mbufs. 534 * 535 * In any case the ack and sequence number of the transmitted 536 * segment are as specified by the parameters. 537 * 538 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 539 */ 540 void 541 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 542 tcp_seq ack, tcp_seq seq, int flags) 543 { 544 int tlen; 545 int win = 0; 546 struct route *ro = NULL; 547 struct route sro; 548 struct ip *ip = ipgen; 549 struct tcphdr *nth; 550 int ipflags = 0; 551 struct route_in6 *ro6 = NULL; 552 struct route_in6 sro6; 553 struct ip6_hdr *ip6 = ipgen; 554 boolean_t use_tmpro = TRUE; 555 #ifdef INET6 556 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); 557 #else 558 const boolean_t isipv6 = FALSE; 559 #endif 560 561 if (tp != NULL) { 562 if (!(flags & TH_RST)) { 563 win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv); 564 if (win < 0) 565 win = 0; 566 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 567 win = (long)TCP_MAXWIN << tp->rcv_scale; 568 } 569 /* 570 * Don't use the route cache of a listen socket, 571 * it is not MPSAFE; use temporary route cache. 572 */ 573 if (tp->t_state != TCPS_LISTEN) { 574 if (isipv6) 575 ro6 = &tp->t_inpcb->in6p_route; 576 else 577 ro = &tp->t_inpcb->inp_route; 578 use_tmpro = FALSE; 579 } 580 } 581 if (use_tmpro) { 582 if (isipv6) { 583 ro6 = &sro6; 584 bzero(ro6, sizeof *ro6); 585 } else { 586 ro = &sro; 587 bzero(ro, sizeof *ro); 588 } 589 } 590 if (m == NULL) { 591 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 592 if (m == NULL) 593 return; 594 tlen = 0; 595 m->m_data += max_linkhdr; 596 if (isipv6) { 597 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); 598 ip6 = mtod(m, struct ip6_hdr *); 599 nth = (struct tcphdr *)(ip6 + 1); 600 } else { 601 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); 602 ip = mtod(m, struct ip *); 603 nth = (struct tcphdr *)(ip + 1); 604 } 605 bcopy(th, nth, sizeof(struct tcphdr)); 606 flags = TH_ACK; 607 } else { 608 m_freem(m->m_next); 609 m->m_next = NULL; 610 m->m_data = (caddr_t)ipgen; 611 /* m_len is set later */ 612 tlen = 0; 613 #define xchg(a, b, type) { type t; t = a; a = b; b = t; } 614 if (isipv6) { 615 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 616 nth = (struct tcphdr *)(ip6 + 1); 617 } else { 618 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 619 nth = (struct tcphdr *)(ip + 1); 620 } 621 if (th != nth) { 622 /* 623 * this is usually a case when an extension header 624 * exists between the IPv6 header and the 625 * TCP header. 626 */ 627 nth->th_sport = th->th_sport; 628 nth->th_dport = th->th_dport; 629 } 630 xchg(nth->th_dport, nth->th_sport, n_short); 631 #undef xchg 632 } 633 if (isipv6) { 634 ip6->ip6_flow = 0; 635 ip6->ip6_vfc = IPV6_VERSION; 636 ip6->ip6_nxt = IPPROTO_TCP; 637 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); 638 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 639 } else { 640 tlen += sizeof(struct tcpiphdr); 641 ip->ip_len = tlen; 642 ip->ip_ttl = ip_defttl; 643 } 644 m->m_len = tlen; 645 m->m_pkthdr.len = tlen; 646 m->m_pkthdr.rcvif = NULL; 647 nth->th_seq = htonl(seq); 648 nth->th_ack = htonl(ack); 649 nth->th_x2 = 0; 650 nth->th_off = sizeof(struct tcphdr) >> 2; 651 nth->th_flags = flags; 652 if (tp != NULL) 653 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 654 else 655 nth->th_win = htons((u_short)win); 656 nth->th_urp = 0; 657 if (isipv6) { 658 nth->th_sum = 0; 659 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 660 sizeof(struct ip6_hdr), 661 tlen - sizeof(struct ip6_hdr)); 662 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 663 (ro6 && ro6->ro_rt) ? 664 ro6->ro_rt->rt_ifp : NULL); 665 } else { 666 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 667 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 668 m->m_pkthdr.csum_flags = CSUM_TCP; 669 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 670 } 671 #ifdef TCPDEBUG 672 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 673 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 674 #endif 675 if (isipv6) { 676 ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 677 tp ? tp->t_inpcb : NULL); 678 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { 679 RTFREE(ro6->ro_rt); 680 ro6->ro_rt = NULL; 681 } 682 } else { 683 ipflags |= IP_DEBUGROUTE; 684 ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); 685 if ((ro == &sro) && (ro->ro_rt != NULL)) { 686 RTFREE(ro->ro_rt); 687 ro->ro_rt = NULL; 688 } 689 } 690 } 691 692 /* 693 * Create a new TCP control block, making an 694 * empty reassembly queue and hooking it to the argument 695 * protocol control block. The `inp' parameter must have 696 * come from the zone allocator set up in tcp_init(). 697 */ 698 struct tcpcb * 699 tcp_newtcpcb(struct inpcb *inp) 700 { 701 struct inp_tp *it; 702 struct tcpcb *tp; 703 #ifdef INET6 704 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 705 #else 706 const boolean_t isipv6 = FALSE; 707 #endif 708 709 it = (struct inp_tp *)inp; 710 tp = &it->tcb; 711 bzero(tp, sizeof(struct tcpcb)); 712 LIST_INIT(&tp->t_segq); 713 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 714 715 /* Set up our timeouts. */ 716 tp->tt_rexmt = &it->inp_tp_rexmt; 717 tp->tt_persist = &it->inp_tp_persist; 718 tp->tt_keep = &it->inp_tp_keep; 719 tp->tt_2msl = &it->inp_tp_2msl; 720 tp->tt_delack = &it->inp_tp_delack; 721 tcp_inittimers(tp); 722 723 /* 724 * Zero out timer message. We don't create it here, 725 * since the current CPU may not be the owner of this 726 * inpcb. 727 */ 728 tp->tt_msg = &it->inp_tp_timermsg; 729 bzero(tp->tt_msg, sizeof(*tp->tt_msg)); 730 731 if (tcp_do_rfc1323) 732 tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); 733 tp->t_inpcb = inp; /* XXX */ 734 tp->t_state = TCPS_CLOSED; 735 /* 736 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 737 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 738 * reasonable initial retransmit time. 739 */ 740 tp->t_srtt = TCPTV_SRTTBASE; 741 tp->t_rttvar = 742 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 743 tp->t_rttmin = tcp_rexmit_min; 744 tp->t_rxtcur = TCPTV_RTOBASE; 745 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 746 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 747 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 748 tp->t_rcvtime = ticks; 749 /* 750 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 751 * because the socket may be bound to an IPv6 wildcard address, 752 * which may match an IPv4-mapped IPv6 address. 753 */ 754 inp->inp_ip_ttl = ip_defttl; 755 inp->inp_ppcb = tp; 756 tcp_sack_tcpcb_init(tp); 757 return (tp); /* XXX */ 758 } 759 760 /* 761 * Drop a TCP connection, reporting the specified error. 762 * If connection is synchronized, then send a RST to peer. 763 */ 764 struct tcpcb * 765 tcp_drop(struct tcpcb *tp, int error) 766 { 767 struct socket *so = tp->t_inpcb->inp_socket; 768 769 if (TCPS_HAVERCVDSYN(tp->t_state)) { 770 tp->t_state = TCPS_CLOSED; 771 tcp_output(tp); 772 tcpstat.tcps_drops++; 773 } else 774 tcpstat.tcps_conndrops++; 775 if (error == ETIMEDOUT && tp->t_softerror) 776 error = tp->t_softerror; 777 so->so_error = error; 778 return (tcp_close(tp)); 779 } 780 781 #ifdef SMP 782 783 struct netmsg_remwildcard { 784 struct netmsg nm_netmsg; 785 struct inpcb *nm_inp; 786 struct inpcbinfo *nm_pcbinfo; 787 #if defined(INET6) 788 int nm_isinet6; 789 #else 790 int nm_unused01; 791 #endif 792 }; 793 794 /* 795 * Wildcard inpcb's on SMP boxes must be removed from all cpus before the 796 * inp can be detached. We do this by cycling through the cpus, ending up 797 * on the cpu controlling the inp last and then doing the disconnect. 798 */ 799 static void 800 in_pcbremwildcardhash_handler(struct netmsg *msg0) 801 { 802 struct netmsg_remwildcard *msg = (struct netmsg_remwildcard *)msg0; 803 int cpu; 804 805 cpu = msg->nm_pcbinfo->cpu; 806 807 if (cpu == msg->nm_inp->inp_pcbinfo->cpu) { 808 /* note: detach removes any wildcard hash entry */ 809 #ifdef INET6 810 if (msg->nm_isinet6) 811 in6_pcbdetach(msg->nm_inp); 812 else 813 #endif 814 in_pcbdetach(msg->nm_inp); 815 lwkt_replymsg(&msg->nm_netmsg.nm_lmsg, 0); 816 } else { 817 in_pcbremwildcardhash_oncpu(msg->nm_inp, msg->nm_pcbinfo); 818 cpu = (cpu + 1) % ncpus2; 819 msg->nm_pcbinfo = &tcbinfo[cpu]; 820 lwkt_forwardmsg(tcp_cport(cpu), &msg->nm_netmsg.nm_lmsg); 821 } 822 } 823 824 #endif 825 826 /* 827 * Close a TCP control block: 828 * discard all space held by the tcp 829 * discard internet protocol block 830 * wake up any sleepers 831 */ 832 struct tcpcb * 833 tcp_close(struct tcpcb *tp) 834 { 835 struct tseg_qent *q; 836 struct inpcb *inp = tp->t_inpcb; 837 struct socket *so = inp->inp_socket; 838 struct rtentry *rt; 839 boolean_t dosavessthresh; 840 #ifdef SMP 841 int cpu; 842 #endif 843 #ifdef INET6 844 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 845 boolean_t isafinet6 = (INP_CHECK_SOCKAF(so, AF_INET6) != 0); 846 #else 847 const boolean_t isipv6 = FALSE; 848 #endif 849 850 /* 851 * The tp is not instantly destroyed in the wildcard case. Setting 852 * the state to TCPS_TERMINATING will prevent the TCP stack from 853 * messing with it, though it should be noted that this change may 854 * not take effect on other cpus until we have chained the wildcard 855 * hash removal. 856 * 857 * XXX we currently depend on the BGL to synchronize the tp->t_state 858 * update and prevent other tcp protocol threads from accepting new 859 * connections on the listen socket we might be trying to close down. 860 */ 861 KKASSERT(tp->t_state != TCPS_TERMINATING); 862 tp->t_state = TCPS_TERMINATING; 863 864 /* 865 * Make sure that all of our timers are stopped before we 866 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL), 867 * timers are never used. If timer message is never created 868 * (tp->tt_msg->tt_tcb == NULL), timers are never used too. 869 */ 870 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) { 871 tcp_callout_stop(tp, tp->tt_rexmt); 872 tcp_callout_stop(tp, tp->tt_persist); 873 tcp_callout_stop(tp, tp->tt_keep); 874 tcp_callout_stop(tp, tp->tt_2msl); 875 tcp_callout_stop(tp, tp->tt_delack); 876 } 877 878 if (tp->t_flags & TF_ONOUTPUTQ) { 879 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); 880 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq); 881 tp->t_flags &= ~TF_ONOUTPUTQ; 882 } 883 884 /* 885 * If we got enough samples through the srtt filter, 886 * save the rtt and rttvar in the routing entry. 887 * 'Enough' is arbitrarily defined as the 16 samples. 888 * 16 samples is enough for the srtt filter to converge 889 * to within 5% of the correct value; fewer samples and 890 * we could save a very bogus rtt. 891 * 892 * Don't update the default route's characteristics and don't 893 * update anything that the user "locked". 894 */ 895 if (tp->t_rttupdated >= 16) { 896 u_long i = 0; 897 898 if (isipv6) { 899 struct sockaddr_in6 *sin6; 900 901 if ((rt = inp->in6p_route.ro_rt) == NULL) 902 goto no_valid_rt; 903 sin6 = (struct sockaddr_in6 *)rt_key(rt); 904 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 905 goto no_valid_rt; 906 } else 907 if ((rt = inp->inp_route.ro_rt) == NULL || 908 ((struct sockaddr_in *)rt_key(rt))-> 909 sin_addr.s_addr == INADDR_ANY) 910 goto no_valid_rt; 911 912 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { 913 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 914 if (rt->rt_rmx.rmx_rtt && i) 915 /* 916 * filter this update to half the old & half 917 * the new values, converting scale. 918 * See route.h and tcp_var.h for a 919 * description of the scaling constants. 920 */ 921 rt->rt_rmx.rmx_rtt = 922 (rt->rt_rmx.rmx_rtt + i) / 2; 923 else 924 rt->rt_rmx.rmx_rtt = i; 925 tcpstat.tcps_cachedrtt++; 926 } 927 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { 928 i = tp->t_rttvar * 929 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 930 if (rt->rt_rmx.rmx_rttvar && i) 931 rt->rt_rmx.rmx_rttvar = 932 (rt->rt_rmx.rmx_rttvar + i) / 2; 933 else 934 rt->rt_rmx.rmx_rttvar = i; 935 tcpstat.tcps_cachedrttvar++; 936 } 937 /* 938 * The old comment here said: 939 * update the pipelimit (ssthresh) if it has been updated 940 * already or if a pipesize was specified & the threshhold 941 * got below half the pipesize. I.e., wait for bad news 942 * before we start updating, then update on both good 943 * and bad news. 944 * 945 * But we want to save the ssthresh even if no pipesize is 946 * specified explicitly in the route, because such 947 * connections still have an implicit pipesize specified 948 * by the global tcp_sendspace. In the absence of a reliable 949 * way to calculate the pipesize, it will have to do. 950 */ 951 i = tp->snd_ssthresh; 952 if (rt->rt_rmx.rmx_sendpipe != 0) 953 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); 954 else 955 dosavessthresh = (i < so->so_snd.ssb_hiwat/2); 956 if (dosavessthresh || 957 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && 958 (rt->rt_rmx.rmx_ssthresh != 0))) { 959 /* 960 * convert the limit from user data bytes to 961 * packets then to packet data bytes. 962 */ 963 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 964 if (i < 2) 965 i = 2; 966 i *= tp->t_maxseg + 967 (isipv6 ? 968 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 969 sizeof(struct tcpiphdr)); 970 if (rt->rt_rmx.rmx_ssthresh) 971 rt->rt_rmx.rmx_ssthresh = 972 (rt->rt_rmx.rmx_ssthresh + i) / 2; 973 else 974 rt->rt_rmx.rmx_ssthresh = i; 975 tcpstat.tcps_cachedssthresh++; 976 } 977 } 978 979 no_valid_rt: 980 /* free the reassembly queue, if any */ 981 while((q = LIST_FIRST(&tp->t_segq)) != NULL) { 982 LIST_REMOVE(q, tqe_q); 983 m_freem(q->tqe_m); 984 FREE(q, M_TSEGQ); 985 tcp_reass_qsize--; 986 } 987 /* throw away SACK blocks in scoreboard*/ 988 if (TCP_DO_SACK(tp)) 989 tcp_sack_cleanup(&tp->scb); 990 991 inp->inp_ppcb = NULL; 992 soisdisconnected(so); 993 994 tcp_destroy_timermsg(tp); 995 if (tp->t_flags & TF_SYNCACHE) 996 syncache_destroy(tp); 997 998 /* 999 * Discard the inp. In the SMP case a wildcard inp's hash (created 1000 * by a listen socket or an INADDR_ANY udp socket) is replicated 1001 * for each protocol thread and must be removed in the context of 1002 * that thread. This is accomplished by chaining the message 1003 * through the cpus. 1004 * 1005 * If the inp is not wildcarded we simply detach, which will remove 1006 * the any hashes still present for this inp. 1007 */ 1008 #ifdef SMP 1009 if (inp->inp_flags & INP_WILDCARD_MP) { 1010 struct netmsg_remwildcard *msg; 1011 1012 cpu = (inp->inp_pcbinfo->cpu + 1) % ncpus2; 1013 msg = kmalloc(sizeof(struct netmsg_remwildcard), 1014 M_LWKTMSG, M_INTWAIT); 1015 netmsg_init(&msg->nm_netmsg, NULL, &netisr_afree_rport, 1016 0, in_pcbremwildcardhash_handler); 1017 #ifdef INET6 1018 msg->nm_isinet6 = isafinet6; 1019 #endif 1020 msg->nm_inp = inp; 1021 msg->nm_pcbinfo = &tcbinfo[cpu]; 1022 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_netmsg.nm_lmsg); 1023 } else 1024 #endif 1025 { 1026 /* note: detach removes any wildcard hash entry */ 1027 #ifdef INET6 1028 if (isafinet6) 1029 in6_pcbdetach(inp); 1030 else 1031 #endif 1032 in_pcbdetach(inp); 1033 } 1034 tcpstat.tcps_closed++; 1035 return (NULL); 1036 } 1037 1038 static __inline void 1039 tcp_drain_oncpu(struct inpcbhead *head) 1040 { 1041 struct inpcb *inpb; 1042 struct tcpcb *tcpb; 1043 struct tseg_qent *te; 1044 1045 LIST_FOREACH(inpb, head, inp_list) { 1046 if (inpb->inp_flags & INP_PLACEMARKER) 1047 continue; 1048 if ((tcpb = intotcpcb(inpb))) { 1049 while ((te = LIST_FIRST(&tcpb->t_segq)) != NULL) { 1050 LIST_REMOVE(te, tqe_q); 1051 m_freem(te->tqe_m); 1052 FREE(te, M_TSEGQ); 1053 tcp_reass_qsize--; 1054 } 1055 } 1056 } 1057 } 1058 1059 #ifdef SMP 1060 struct netmsg_tcp_drain { 1061 struct netmsg nm_netmsg; 1062 struct inpcbhead *nm_head; 1063 }; 1064 1065 static void 1066 tcp_drain_handler(netmsg_t netmsg) 1067 { 1068 struct netmsg_tcp_drain *nm = (void *)netmsg; 1069 1070 tcp_drain_oncpu(nm->nm_head); 1071 lwkt_replymsg(&nm->nm_netmsg.nm_lmsg, 0); 1072 } 1073 #endif 1074 1075 void 1076 tcp_drain(void) 1077 { 1078 #ifdef SMP 1079 int cpu; 1080 #endif 1081 1082 if (!do_tcpdrain) 1083 return; 1084 1085 /* 1086 * Walk the tcpbs, if existing, and flush the reassembly queue, 1087 * if there is one... 1088 * XXX: The "Net/3" implementation doesn't imply that the TCP 1089 * reassembly queue should be flushed, but in a situation 1090 * where we're really low on mbufs, this is potentially 1091 * useful. 1092 */ 1093 #ifdef SMP 1094 for (cpu = 0; cpu < ncpus2; cpu++) { 1095 struct netmsg_tcp_drain *msg; 1096 1097 if (cpu == mycpu->gd_cpuid) { 1098 tcp_drain_oncpu(&tcbinfo[cpu].pcblisthead); 1099 } else { 1100 msg = kmalloc(sizeof(struct netmsg_tcp_drain), 1101 M_LWKTMSG, M_NOWAIT); 1102 if (msg == NULL) 1103 continue; 1104 netmsg_init(&msg->nm_netmsg, NULL, &netisr_afree_rport, 1105 0, tcp_drain_handler); 1106 msg->nm_head = &tcbinfo[cpu].pcblisthead; 1107 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_netmsg.nm_lmsg); 1108 } 1109 } 1110 #else 1111 tcp_drain_oncpu(&tcbinfo[0].pcblisthead); 1112 #endif 1113 } 1114 1115 /* 1116 * Notify a tcp user of an asynchronous error; 1117 * store error as soft error, but wake up user 1118 * (for now, won't do anything until can select for soft error). 1119 * 1120 * Do not wake up user since there currently is no mechanism for 1121 * reporting soft errors (yet - a kqueue filter may be added). 1122 */ 1123 static void 1124 tcp_notify(struct inpcb *inp, int error) 1125 { 1126 struct tcpcb *tp = intotcpcb(inp); 1127 1128 /* 1129 * Ignore some errors if we are hooked up. 1130 * If connection hasn't completed, has retransmitted several times, 1131 * and receives a second error, give up now. This is better 1132 * than waiting a long time to establish a connection that 1133 * can never complete. 1134 */ 1135 if (tp->t_state == TCPS_ESTABLISHED && 1136 (error == EHOSTUNREACH || error == ENETUNREACH || 1137 error == EHOSTDOWN)) { 1138 return; 1139 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1140 tp->t_softerror) 1141 tcp_drop(tp, error); 1142 else 1143 tp->t_softerror = error; 1144 #if 0 1145 wakeup(&so->so_timeo); 1146 sorwakeup(so); 1147 sowwakeup(so); 1148 #endif 1149 } 1150 1151 static int 1152 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1153 { 1154 int error, i, n; 1155 struct inpcb *marker; 1156 struct inpcb *inp; 1157 inp_gen_t gencnt; 1158 globaldata_t gd; 1159 int origcpu, ccpu; 1160 1161 error = 0; 1162 n = 0; 1163 1164 /* 1165 * The process of preparing the TCB list is too time-consuming and 1166 * resource-intensive to repeat twice on every request. 1167 */ 1168 if (req->oldptr == NULL) { 1169 for (ccpu = 0; ccpu < ncpus; ++ccpu) { 1170 gd = globaldata_find(ccpu); 1171 n += tcbinfo[gd->gd_cpuid].ipi_count; 1172 } 1173 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb); 1174 return (0); 1175 } 1176 1177 if (req->newptr != NULL) 1178 return (EPERM); 1179 1180 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); 1181 marker->inp_flags |= INP_PLACEMARKER; 1182 1183 /* 1184 * OK, now we're committed to doing something. Run the inpcb list 1185 * for each cpu in the system and construct the output. Use a 1186 * list placemarker to deal with list changes occuring during 1187 * copyout blockages (but otherwise depend on being on the correct 1188 * cpu to avoid races). 1189 */ 1190 origcpu = mycpu->gd_cpuid; 1191 for (ccpu = 1; ccpu <= ncpus && error == 0; ++ccpu) { 1192 globaldata_t rgd; 1193 caddr_t inp_ppcb; 1194 struct xtcpcb xt; 1195 int cpu_id; 1196 1197 cpu_id = (origcpu + ccpu) % ncpus; 1198 if ((smp_active_mask & (1 << cpu_id)) == 0) 1199 continue; 1200 rgd = globaldata_find(cpu_id); 1201 lwkt_setcpu_self(rgd); 1202 1203 gencnt = tcbinfo[cpu_id].ipi_gencnt; 1204 n = tcbinfo[cpu_id].ipi_count; 1205 1206 LIST_INSERT_HEAD(&tcbinfo[cpu_id].pcblisthead, marker, inp_list); 1207 i = 0; 1208 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { 1209 /* 1210 * process a snapshot of pcbs, ignoring placemarkers 1211 * and using our own to allow SYSCTL_OUT to block. 1212 */ 1213 LIST_REMOVE(marker, inp_list); 1214 LIST_INSERT_AFTER(inp, marker, inp_list); 1215 1216 if (inp->inp_flags & INP_PLACEMARKER) 1217 continue; 1218 if (inp->inp_gencnt > gencnt) 1219 continue; 1220 if (prison_xinpcb(req->td, inp)) 1221 continue; 1222 1223 xt.xt_len = sizeof xt; 1224 bcopy(inp, &xt.xt_inp, sizeof *inp); 1225 inp_ppcb = inp->inp_ppcb; 1226 if (inp_ppcb != NULL) 1227 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 1228 else 1229 bzero(&xt.xt_tp, sizeof xt.xt_tp); 1230 if (inp->inp_socket) 1231 sotoxsocket(inp->inp_socket, &xt.xt_socket); 1232 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) 1233 break; 1234 ++i; 1235 } 1236 LIST_REMOVE(marker, inp_list); 1237 if (error == 0 && i < n) { 1238 bzero(&xt, sizeof xt); 1239 xt.xt_len = sizeof xt; 1240 while (i < n) { 1241 error = SYSCTL_OUT(req, &xt, sizeof xt); 1242 if (error) 1243 break; 1244 ++i; 1245 } 1246 } 1247 } 1248 1249 /* 1250 * Make sure we are on the same cpu we were on originally, since 1251 * higher level callers expect this. Also don't pollute caches with 1252 * migrated userland data by (eventually) returning to userland 1253 * on a different cpu. 1254 */ 1255 lwkt_setcpu_self(globaldata_find(origcpu)); 1256 kfree(marker, M_TEMP); 1257 return (error); 1258 } 1259 1260 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1261 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1262 1263 static int 1264 tcp_getcred(SYSCTL_HANDLER_ARGS) 1265 { 1266 struct sockaddr_in addrs[2]; 1267 struct inpcb *inp; 1268 int cpu; 1269 int error; 1270 1271 error = priv_check(req->td, PRIV_ROOT); 1272 if (error != 0) 1273 return (error); 1274 error = SYSCTL_IN(req, addrs, sizeof addrs); 1275 if (error != 0) 1276 return (error); 1277 crit_enter(); 1278 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 1279 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 1280 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 1281 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1282 if (inp == NULL || inp->inp_socket == NULL) { 1283 error = ENOENT; 1284 goto out; 1285 } 1286 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1287 out: 1288 crit_exit(); 1289 return (error); 1290 } 1291 1292 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1293 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 1294 1295 #ifdef INET6 1296 static int 1297 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1298 { 1299 struct sockaddr_in6 addrs[2]; 1300 struct inpcb *inp; 1301 int error; 1302 boolean_t mapped = FALSE; 1303 1304 error = priv_check(req->td, PRIV_ROOT); 1305 if (error != 0) 1306 return (error); 1307 error = SYSCTL_IN(req, addrs, sizeof addrs); 1308 if (error != 0) 1309 return (error); 1310 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1311 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1312 mapped = TRUE; 1313 else 1314 return (EINVAL); 1315 } 1316 crit_enter(); 1317 if (mapped) { 1318 inp = in_pcblookup_hash(&tcbinfo[0], 1319 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1320 addrs[1].sin6_port, 1321 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1322 addrs[0].sin6_port, 1323 0, NULL); 1324 } else { 1325 inp = in6_pcblookup_hash(&tcbinfo[0], 1326 &addrs[1].sin6_addr, addrs[1].sin6_port, 1327 &addrs[0].sin6_addr, addrs[0].sin6_port, 1328 0, NULL); 1329 } 1330 if (inp == NULL || inp->inp_socket == NULL) { 1331 error = ENOENT; 1332 goto out; 1333 } 1334 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1335 out: 1336 crit_exit(); 1337 return (error); 1338 } 1339 1340 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1341 0, 0, 1342 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1343 #endif 1344 1345 struct netmsg_tcp_notify { 1346 struct netmsg nm_nmsg; 1347 void (*nm_notify)(struct inpcb *, int); 1348 struct in_addr nm_faddr; 1349 int nm_arg; 1350 }; 1351 1352 static void 1353 tcp_notifyall_oncpu(struct netmsg *netmsg) 1354 { 1355 struct netmsg_tcp_notify *nmsg = (struct netmsg_tcp_notify *)netmsg; 1356 int nextcpu; 1357 1358 in_pcbnotifyall(&tcbinfo[mycpuid].pcblisthead, nmsg->nm_faddr, 1359 nmsg->nm_arg, nmsg->nm_notify); 1360 1361 nextcpu = mycpuid + 1; 1362 if (nextcpu < ncpus2) 1363 lwkt_forwardmsg(tcp_cport(nextcpu), &netmsg->nm_lmsg); 1364 else 1365 lwkt_replymsg(&netmsg->nm_lmsg, 0); 1366 } 1367 1368 void 1369 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 1370 { 1371 struct ip *ip = vip; 1372 struct tcphdr *th; 1373 struct in_addr faddr; 1374 struct inpcb *inp; 1375 struct tcpcb *tp; 1376 void (*notify)(struct inpcb *, int) = tcp_notify; 1377 tcp_seq icmpseq; 1378 int arg, cpu; 1379 1380 if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 1381 return; 1382 } 1383 1384 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1385 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1386 return; 1387 1388 arg = inetctlerrmap[cmd]; 1389 if (cmd == PRC_QUENCH) { 1390 notify = tcp_quench; 1391 } else if (icmp_may_rst && 1392 (cmd == PRC_UNREACH_ADMIN_PROHIB || 1393 cmd == PRC_UNREACH_PORT || 1394 cmd == PRC_TIMXCEED_INTRANS) && 1395 ip != NULL) { 1396 notify = tcp_drop_syn_sent; 1397 } else if (cmd == PRC_MSGSIZE) { 1398 struct icmp *icmp = (struct icmp *) 1399 ((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 1400 1401 arg = ntohs(icmp->icmp_nextmtu); 1402 notify = tcp_mtudisc; 1403 } else if (PRC_IS_REDIRECT(cmd)) { 1404 ip = NULL; 1405 notify = in_rtchange; 1406 } else if (cmd == PRC_HOSTDEAD) { 1407 ip = NULL; 1408 } 1409 1410 if (ip != NULL) { 1411 crit_enter(); 1412 th = (struct tcphdr *)((caddr_t)ip + 1413 (IP_VHL_HL(ip->ip_vhl) << 2)); 1414 cpu = tcp_addrcpu(faddr.s_addr, th->th_dport, 1415 ip->ip_src.s_addr, th->th_sport); 1416 inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport, 1417 ip->ip_src, th->th_sport, 0, NULL); 1418 if ((inp != NULL) && (inp->inp_socket != NULL)) { 1419 icmpseq = htonl(th->th_seq); 1420 tp = intotcpcb(inp); 1421 if (SEQ_GEQ(icmpseq, tp->snd_una) && 1422 SEQ_LT(icmpseq, tp->snd_max)) 1423 (*notify)(inp, arg); 1424 } else { 1425 struct in_conninfo inc; 1426 1427 inc.inc_fport = th->th_dport; 1428 inc.inc_lport = th->th_sport; 1429 inc.inc_faddr = faddr; 1430 inc.inc_laddr = ip->ip_src; 1431 #ifdef INET6 1432 inc.inc_isipv6 = 0; 1433 #endif 1434 syncache_unreach(&inc, th); 1435 } 1436 crit_exit(); 1437 } else { 1438 struct netmsg_tcp_notify nmsg; 1439 1440 KKASSERT(&curthread->td_msgport == cpu_portfn(0)); 1441 netmsg_init(&nmsg.nm_nmsg, NULL, &curthread->td_msgport, 1442 0, tcp_notifyall_oncpu); 1443 nmsg.nm_faddr = faddr; 1444 nmsg.nm_arg = arg; 1445 nmsg.nm_notify = notify; 1446 1447 lwkt_domsg(tcp_cport(0), &nmsg.nm_nmsg.nm_lmsg, 0); 1448 } 1449 } 1450 1451 #ifdef INET6 1452 void 1453 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) 1454 { 1455 struct tcphdr th; 1456 void (*notify) (struct inpcb *, int) = tcp_notify; 1457 struct ip6_hdr *ip6; 1458 struct mbuf *m; 1459 struct ip6ctlparam *ip6cp = NULL; 1460 const struct sockaddr_in6 *sa6_src = NULL; 1461 int off; 1462 struct tcp_portonly { 1463 u_int16_t th_sport; 1464 u_int16_t th_dport; 1465 } *thp; 1466 int arg; 1467 1468 if (sa->sa_family != AF_INET6 || 1469 sa->sa_len != sizeof(struct sockaddr_in6)) 1470 return; 1471 1472 arg = 0; 1473 if (cmd == PRC_QUENCH) 1474 notify = tcp_quench; 1475 else if (cmd == PRC_MSGSIZE) { 1476 struct ip6ctlparam *ip6cp = d; 1477 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; 1478 1479 arg = ntohl(icmp6->icmp6_mtu); 1480 notify = tcp_mtudisc; 1481 } else if (!PRC_IS_REDIRECT(cmd) && 1482 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) { 1483 return; 1484 } 1485 1486 /* if the parameter is from icmp6, decode it. */ 1487 if (d != NULL) { 1488 ip6cp = (struct ip6ctlparam *)d; 1489 m = ip6cp->ip6c_m; 1490 ip6 = ip6cp->ip6c_ip6; 1491 off = ip6cp->ip6c_off; 1492 sa6_src = ip6cp->ip6c_src; 1493 } else { 1494 m = NULL; 1495 ip6 = NULL; 1496 off = 0; /* fool gcc */ 1497 sa6_src = &sa6_any; 1498 } 1499 1500 if (ip6 != NULL) { 1501 struct in_conninfo inc; 1502 /* 1503 * XXX: We assume that when IPV6 is non NULL, 1504 * M and OFF are valid. 1505 */ 1506 1507 /* check if we can safely examine src and dst ports */ 1508 if (m->m_pkthdr.len < off + sizeof *thp) 1509 return; 1510 1511 bzero(&th, sizeof th); 1512 m_copydata(m, off, sizeof *thp, (caddr_t)&th); 1513 1514 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, th.th_dport, 1515 (struct sockaddr *)ip6cp->ip6c_src, 1516 th.th_sport, cmd, arg, notify); 1517 1518 inc.inc_fport = th.th_dport; 1519 inc.inc_lport = th.th_sport; 1520 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1521 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1522 inc.inc_isipv6 = 1; 1523 syncache_unreach(&inc, &th); 1524 } else 1525 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, 0, 1526 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify); 1527 } 1528 #endif 1529 1530 /* 1531 * Following is where TCP initial sequence number generation occurs. 1532 * 1533 * There are two places where we must use initial sequence numbers: 1534 * 1. In SYN-ACK packets. 1535 * 2. In SYN packets. 1536 * 1537 * All ISNs for SYN-ACK packets are generated by the syncache. See 1538 * tcp_syncache.c for details. 1539 * 1540 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1541 * depends on this property. In addition, these ISNs should be 1542 * unguessable so as to prevent connection hijacking. To satisfy 1543 * the requirements of this situation, the algorithm outlined in 1544 * RFC 1948 is used to generate sequence numbers. 1545 * 1546 * Implementation details: 1547 * 1548 * Time is based off the system timer, and is corrected so that it 1549 * increases by one megabyte per second. This allows for proper 1550 * recycling on high speed LANs while still leaving over an hour 1551 * before rollover. 1552 * 1553 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1554 * between seeding of isn_secret. This is normally set to zero, 1555 * as reseeding should not be necessary. 1556 * 1557 */ 1558 1559 #define ISN_BYTES_PER_SECOND 1048576 1560 1561 u_char isn_secret[32]; 1562 int isn_last_reseed; 1563 MD5_CTX isn_ctx; 1564 1565 tcp_seq 1566 tcp_new_isn(struct tcpcb *tp) 1567 { 1568 u_int32_t md5_buffer[4]; 1569 tcp_seq new_isn; 1570 1571 /* Seed if this is the first use, reseed if requested. */ 1572 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1573 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1574 < (u_int)ticks))) { 1575 read_random_unlimited(&isn_secret, sizeof isn_secret); 1576 isn_last_reseed = ticks; 1577 } 1578 1579 /* Compute the md5 hash and return the ISN. */ 1580 MD5Init(&isn_ctx); 1581 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short)); 1582 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short)); 1583 #ifdef INET6 1584 if (tp->t_inpcb->inp_vflag & INP_IPV6) { 1585 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1586 sizeof(struct in6_addr)); 1587 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1588 sizeof(struct in6_addr)); 1589 } else 1590 #endif 1591 { 1592 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1593 sizeof(struct in_addr)); 1594 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1595 sizeof(struct in_addr)); 1596 } 1597 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1598 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1599 new_isn = (tcp_seq) md5_buffer[0]; 1600 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1601 return (new_isn); 1602 } 1603 1604 /* 1605 * When a source quench is received, close congestion window 1606 * to one segment. We will gradually open it again as we proceed. 1607 */ 1608 void 1609 tcp_quench(struct inpcb *inp, int error) 1610 { 1611 struct tcpcb *tp = intotcpcb(inp); 1612 1613 if (tp != NULL) { 1614 tp->snd_cwnd = tp->t_maxseg; 1615 tp->snd_wacked = 0; 1616 } 1617 } 1618 1619 /* 1620 * When a specific ICMP unreachable message is received and the 1621 * connection state is SYN-SENT, drop the connection. This behavior 1622 * is controlled by the icmp_may_rst sysctl. 1623 */ 1624 void 1625 tcp_drop_syn_sent(struct inpcb *inp, int error) 1626 { 1627 struct tcpcb *tp = intotcpcb(inp); 1628 1629 if ((tp != NULL) && (tp->t_state == TCPS_SYN_SENT)) 1630 tcp_drop(tp, error); 1631 } 1632 1633 /* 1634 * When a `need fragmentation' ICMP is received, update our idea of the MSS 1635 * based on the new value in the route. Also nudge TCP to send something, 1636 * since we know the packet we just sent was dropped. 1637 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1638 */ 1639 void 1640 tcp_mtudisc(struct inpcb *inp, int mtu) 1641 { 1642 struct tcpcb *tp = intotcpcb(inp); 1643 struct rtentry *rt; 1644 struct socket *so = inp->inp_socket; 1645 int maxopd, mss; 1646 #ifdef INET6 1647 boolean_t isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0); 1648 #else 1649 const boolean_t isipv6 = FALSE; 1650 #endif 1651 1652 if (tp == NULL) 1653 return; 1654 1655 /* 1656 * If no MTU is provided in the ICMP message, use the 1657 * next lower likely value, as specified in RFC 1191. 1658 */ 1659 if (mtu == 0) { 1660 int oldmtu; 1661 1662 oldmtu = tp->t_maxopd + 1663 (isipv6 ? 1664 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1665 sizeof(struct tcpiphdr)); 1666 mtu = ip_next_mtu(oldmtu, 0); 1667 } 1668 1669 if (isipv6) 1670 rt = tcp_rtlookup6(&inp->inp_inc); 1671 else 1672 rt = tcp_rtlookup(&inp->inp_inc); 1673 if (rt != NULL) { 1674 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu) 1675 mtu = rt->rt_rmx.rmx_mtu; 1676 1677 maxopd = mtu - 1678 (isipv6 ? 1679 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1680 sizeof(struct tcpiphdr)); 1681 1682 /* 1683 * XXX - The following conditional probably violates the TCP 1684 * spec. The problem is that, since we don't know the 1685 * other end's MSS, we are supposed to use a conservative 1686 * default. But, if we do that, then MTU discovery will 1687 * never actually take place, because the conservative 1688 * default is much less than the MTUs typically seen 1689 * on the Internet today. For the moment, we'll sweep 1690 * this under the carpet. 1691 * 1692 * The conservative default might not actually be a problem 1693 * if the only case this occurs is when sending an initial 1694 * SYN with options and data to a host we've never talked 1695 * to before. Then, they will reply with an MSS value which 1696 * will get recorded and the new parameters should get 1697 * recomputed. For Further Study. 1698 */ 1699 if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd) 1700 maxopd = rt->rt_rmx.rmx_mssopt; 1701 } else 1702 maxopd = mtu - 1703 (isipv6 ? 1704 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1705 sizeof(struct tcpiphdr)); 1706 1707 if (tp->t_maxopd <= maxopd) 1708 return; 1709 tp->t_maxopd = maxopd; 1710 1711 mss = maxopd; 1712 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) == 1713 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 1714 mss -= TCPOLEN_TSTAMP_APPA; 1715 1716 /* round down to multiple of MCLBYTES */ 1717 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */ 1718 if (mss > MCLBYTES) 1719 mss &= ~(MCLBYTES - 1); 1720 #else 1721 if (mss > MCLBYTES) 1722 mss = (mss / MCLBYTES) * MCLBYTES; 1723 #endif 1724 1725 if (so->so_snd.ssb_hiwat < mss) 1726 mss = so->so_snd.ssb_hiwat; 1727 1728 tp->t_maxseg = mss; 1729 tp->t_rtttime = 0; 1730 tp->snd_nxt = tp->snd_una; 1731 tcp_output(tp); 1732 tcpstat.tcps_mturesent++; 1733 } 1734 1735 /* 1736 * Look-up the routing entry to the peer of this inpcb. If no route 1737 * is found and it cannot be allocated the return NULL. This routine 1738 * is called by TCP routines that access the rmx structure and by tcp_mss 1739 * to get the interface MTU. 1740 */ 1741 struct rtentry * 1742 tcp_rtlookup(struct in_conninfo *inc) 1743 { 1744 struct route *ro = &inc->inc_route; 1745 1746 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) { 1747 /* No route yet, so try to acquire one */ 1748 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1749 /* 1750 * unused portions of the structure MUST be zero'd 1751 * out because rtalloc() treats it as opaque data 1752 */ 1753 bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); 1754 ro->ro_dst.sa_family = AF_INET; 1755 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1756 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1757 inc->inc_faddr; 1758 rtalloc(ro); 1759 } 1760 } 1761 return (ro->ro_rt); 1762 } 1763 1764 #ifdef INET6 1765 struct rtentry * 1766 tcp_rtlookup6(struct in_conninfo *inc) 1767 { 1768 struct route_in6 *ro6 = &inc->inc6_route; 1769 1770 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) { 1771 /* No route yet, so try to acquire one */ 1772 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1773 /* 1774 * unused portions of the structure MUST be zero'd 1775 * out because rtalloc() treats it as opaque data 1776 */ 1777 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); 1778 ro6->ro_dst.sin6_family = AF_INET6; 1779 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1780 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1781 rtalloc((struct route *)ro6); 1782 } 1783 } 1784 return (ro6->ro_rt); 1785 } 1786 #endif 1787 1788 #ifdef IPSEC 1789 /* compute ESP/AH header size for TCP, including outer IP header. */ 1790 size_t 1791 ipsec_hdrsiz_tcp(struct tcpcb *tp) 1792 { 1793 struct inpcb *inp; 1794 struct mbuf *m; 1795 size_t hdrsiz; 1796 struct ip *ip; 1797 struct tcphdr *th; 1798 1799 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1800 return (0); 1801 MGETHDR(m, MB_DONTWAIT, MT_DATA); 1802 if (!m) 1803 return (0); 1804 1805 #ifdef INET6 1806 if (inp->inp_vflag & INP_IPV6) { 1807 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 1808 1809 th = (struct tcphdr *)(ip6 + 1); 1810 m->m_pkthdr.len = m->m_len = 1811 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1812 tcp_fillheaders(tp, ip6, th); 1813 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1814 } else 1815 #endif 1816 { 1817 ip = mtod(m, struct ip *); 1818 th = (struct tcphdr *)(ip + 1); 1819 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1820 tcp_fillheaders(tp, ip, th); 1821 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1822 } 1823 1824 m_free(m); 1825 return (hdrsiz); 1826 } 1827 #endif 1828 1829 /* 1830 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1831 * 1832 * This code attempts to calculate the bandwidth-delay product as a 1833 * means of determining the optimal window size to maximize bandwidth, 1834 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1835 * routers. This code also does a fairly good job keeping RTTs in check 1836 * across slow links like modems. We implement an algorithm which is very 1837 * similar (but not meant to be) TCP/Vegas. The code operates on the 1838 * transmitter side of a TCP connection and so only effects the transmit 1839 * side of the connection. 1840 * 1841 * BACKGROUND: TCP makes no provision for the management of buffer space 1842 * at the end points or at the intermediate routers and switches. A TCP 1843 * stream, whether using NewReno or not, will eventually buffer as 1844 * many packets as it is able and the only reason this typically works is 1845 * due to the fairly small default buffers made available for a connection 1846 * (typicaly 16K or 32K). As machines use larger windows and/or window 1847 * scaling it is now fairly easy for even a single TCP connection to blow-out 1848 * all available buffer space not only on the local interface, but on 1849 * intermediate routers and switches as well. NewReno makes a misguided 1850 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1851 * then backing off, then steadily increasing the window again until another 1852 * failure occurs, ad-infinitum. This results in terrible oscillation that 1853 * is only made worse as network loads increase and the idea of intentionally 1854 * blowing out network buffers is, frankly, a terrible way to manage network 1855 * resources. 1856 * 1857 * It is far better to limit the transmit window prior to the failure 1858 * condition being achieved. There are two general ways to do this: First 1859 * you can 'scan' through different transmit window sizes and locate the 1860 * point where the RTT stops increasing, indicating that you have filled the 1861 * pipe, then scan backwards until you note that RTT stops decreasing, then 1862 * repeat ad-infinitum. This method works in principle but has severe 1863 * implementation issues due to RTT variances, timer granularity, and 1864 * instability in the algorithm which can lead to many false positives and 1865 * create oscillations as well as interact badly with other TCP streams 1866 * implementing the same algorithm. 1867 * 1868 * The second method is to limit the window to the bandwidth delay product 1869 * of the link. This is the method we implement. RTT variances and our 1870 * own manipulation of the congestion window, bwnd, can potentially 1871 * destabilize the algorithm. For this reason we have to stabilize the 1872 * elements used to calculate the window. We do this by using the minimum 1873 * observed RTT, the long term average of the observed bandwidth, and 1874 * by adding two segments worth of slop. It isn't perfect but it is able 1875 * to react to changing conditions and gives us a very stable basis on 1876 * which to extend the algorithm. 1877 */ 1878 void 1879 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1880 { 1881 u_long bw; 1882 u_long bwnd; 1883 int save_ticks; 1884 int delta_ticks; 1885 1886 /* 1887 * If inflight_enable is disabled in the middle of a tcp connection, 1888 * make sure snd_bwnd is effectively disabled. 1889 */ 1890 if (!tcp_inflight_enable) { 1891 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1892 tp->snd_bandwidth = 0; 1893 return; 1894 } 1895 1896 /* 1897 * Validate the delta time. If a connection is new or has been idle 1898 * a long time we have to reset the bandwidth calculator. 1899 */ 1900 save_ticks = ticks; 1901 delta_ticks = save_ticks - tp->t_bw_rtttime; 1902 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) { 1903 tp->t_bw_rtttime = ticks; 1904 tp->t_bw_rtseq = ack_seq; 1905 if (tp->snd_bandwidth == 0) 1906 tp->snd_bandwidth = tcp_inflight_min; 1907 return; 1908 } 1909 if (delta_ticks == 0) 1910 return; 1911 1912 /* 1913 * Sanity check, plus ignore pure window update acks. 1914 */ 1915 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0) 1916 return; 1917 1918 /* 1919 * Figure out the bandwidth. Due to the tick granularity this 1920 * is a very rough number and it MUST be averaged over a fairly 1921 * long period of time. XXX we need to take into account a link 1922 * that is not using all available bandwidth, but for now our 1923 * slop will ramp us up if this case occurs and the bandwidth later 1924 * increases. 1925 */ 1926 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks; 1927 tp->t_bw_rtttime = save_ticks; 1928 tp->t_bw_rtseq = ack_seq; 1929 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1930 1931 tp->snd_bandwidth = bw; 1932 1933 /* 1934 * Calculate the semi-static bandwidth delay product, plus two maximal 1935 * segments. The additional slop puts us squarely in the sweet 1936 * spot and also handles the bandwidth run-up case. Without the 1937 * slop we could be locking ourselves into a lower bandwidth. 1938 * 1939 * Situations Handled: 1940 * (1) Prevents over-queueing of packets on LANs, especially on 1941 * high speed LANs, allowing larger TCP buffers to be 1942 * specified, and also does a good job preventing 1943 * over-queueing of packets over choke points like modems 1944 * (at least for the transmit side). 1945 * 1946 * (2) Is able to handle changing network loads (bandwidth 1947 * drops so bwnd drops, bandwidth increases so bwnd 1948 * increases). 1949 * 1950 * (3) Theoretically should stabilize in the face of multiple 1951 * connections implementing the same algorithm (this may need 1952 * a little work). 1953 * 1954 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1955 * be adjusted with a sysctl but typically only needs to be on 1956 * very slow connections. A value no smaller then 5 should 1957 * be used, but only reduce this default if you have no other 1958 * choice. 1959 */ 1960 1961 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1962 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 1963 tcp_inflight_stab * (int)tp->t_maxseg / 10; 1964 #undef USERTT 1965 1966 if (tcp_inflight_debug > 0) { 1967 static int ltime; 1968 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1969 ltime = ticks; 1970 kprintf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1971 tp, bw, tp->t_rttbest, tp->t_srtt, bwnd); 1972 } 1973 } 1974 if ((long)bwnd < tcp_inflight_min) 1975 bwnd = tcp_inflight_min; 1976 if (bwnd > tcp_inflight_max) 1977 bwnd = tcp_inflight_max; 1978 if ((long)bwnd < tp->t_maxseg * 2) 1979 bwnd = tp->t_maxseg * 2; 1980 tp->snd_bwnd = bwnd; 1981 } 1982