1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 68 * $DragonFly: src/sys/netinet/tcp_subr.c,v 1.63 2008/11/11 10:46:58 sephe Exp $ 69 */ 70 71 #include "opt_compat.h" 72 #include "opt_inet6.h" 73 #include "opt_ipsec.h" 74 #include "opt_tcpdebug.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/callout.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/malloc.h> 82 #include <sys/mpipe.h> 83 #include <sys/mbuf.h> 84 #ifdef INET6 85 #include <sys/domain.h> 86 #endif 87 #include <sys/proc.h> 88 #include <sys/priv.h> 89 #include <sys/socket.h> 90 #include <sys/socketvar.h> 91 #include <sys/protosw.h> 92 #include <sys/random.h> 93 #include <sys/in_cksum.h> 94 #include <sys/ktr.h> 95 96 #include <vm/vm_zone.h> 97 98 #include <net/route.h> 99 #include <net/if.h> 100 #include <net/netisr.h> 101 102 #define _IP_VHL 103 #include <netinet/in.h> 104 #include <netinet/in_systm.h> 105 #include <netinet/ip.h> 106 #include <netinet/ip6.h> 107 #include <netinet/in_pcb.h> 108 #include <netinet6/in6_pcb.h> 109 #include <netinet/in_var.h> 110 #include <netinet/ip_var.h> 111 #include <netinet6/ip6_var.h> 112 #include <netinet/ip_icmp.h> 113 #ifdef INET6 114 #include <netinet/icmp6.h> 115 #endif 116 #include <netinet/tcp.h> 117 #include <netinet/tcp_fsm.h> 118 #include <netinet/tcp_seq.h> 119 #include <netinet/tcp_timer.h> 120 #include <netinet/tcp_timer2.h> 121 #include <netinet/tcp_var.h> 122 #include <netinet6/tcp6_var.h> 123 #include <netinet/tcpip.h> 124 #ifdef TCPDEBUG 125 #include <netinet/tcp_debug.h> 126 #endif 127 #include <netinet6/ip6protosw.h> 128 129 #ifdef IPSEC 130 #include <netinet6/ipsec.h> 131 #ifdef INET6 132 #include <netinet6/ipsec6.h> 133 #endif 134 #endif 135 136 #ifdef FAST_IPSEC 137 #include <netproto/ipsec/ipsec.h> 138 #ifdef INET6 139 #include <netproto/ipsec/ipsec6.h> 140 #endif 141 #define IPSEC 142 #endif 143 144 #include <sys/md5.h> 145 #include <sys/msgport2.h> 146 #include <machine/smp.h> 147 148 #include <net/netmsg2.h> 149 150 #if !defined(KTR_TCP) 151 #define KTR_TCP KTR_ALL 152 #endif 153 KTR_INFO_MASTER(tcp); 154 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0); 155 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0); 156 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0); 157 #define logtcp(name) KTR_LOG(tcp_ ## name) 158 159 struct inpcbinfo tcbinfo[MAXCPU]; 160 struct tcpcbackqhead tcpcbackq[MAXCPU]; 161 162 int tcp_mpsafe_proto = 0; 163 TUNABLE_INT("net.inet.tcp.mpsafe_proto", &tcp_mpsafe_proto); 164 165 static int tcp_mpsafe_thread = NETMSG_SERVICE_ADAPTIVE; 166 TUNABLE_INT("net.inet.tcp.mpsafe_thread", &tcp_mpsafe_thread); 167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, mpsafe_thread, CTLFLAG_RW, 168 &tcp_mpsafe_thread, 0, 169 "0:BGL, 1:Adaptive BGL, 2:No BGL(experimental)"); 170 171 int tcp_mssdflt = TCP_MSS; 172 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 173 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); 174 175 #ifdef INET6 176 int tcp_v6mssdflt = TCP6_MSS; 177 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, 178 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); 179 #endif 180 181 /* 182 * Minimum MSS we accept and use. This prevents DoS attacks where 183 * we are forced to a ridiculous low MSS like 20 and send hundreds 184 * of packets instead of one. The effect scales with the available 185 * bandwidth and quickly saturates the CPU and network interface 186 * with packet generation and sending. Set to zero to disable MINMSS 187 * checking. This setting prevents us from sending too small packets. 188 */ 189 int tcp_minmss = TCP_MINMSS; 190 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 191 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 192 193 #if 0 194 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 195 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 196 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); 197 #endif 198 199 int tcp_do_rfc1323 = 1; 200 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 201 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); 202 203 int tcp_do_rfc1644 = 0; 204 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 205 &tcp_do_rfc1644, 0, "Enable rfc1644 (TTCP) extensions"); 206 207 static int tcp_tcbhashsize = 0; 208 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 209 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); 210 211 static int do_tcpdrain = 1; 212 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 213 "Enable tcp_drain routine for extra help when low on mbufs"); 214 215 /* XXX JH */ 216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 217 &tcbinfo[0].ipi_count, 0, "Number of active PCBs"); 218 219 static int icmp_may_rst = 1; 220 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 221 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 222 223 static int tcp_isn_reseed_interval = 0; 224 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 225 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 226 227 /* 228 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on 229 * by default, but with generous values which should allow maximal 230 * bandwidth. In particular, the slop defaults to 50 (5 packets). 231 * 232 * The reason for doing this is that the limiter is the only mechanism we 233 * have which seems to do a really good job preventing receiver RX rings 234 * on network interfaces from getting blown out. Even though GigE/10GigE 235 * is supposed to flow control it looks like either it doesn't actually 236 * do it or Open Source drivers do not properly enable it. 237 * 238 * People using the limiter to reduce bottlenecks on slower WAN connections 239 * should set the slop to 20 (2 packets). 240 */ 241 static int tcp_inflight_enable = 1; 242 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 243 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 244 245 static int tcp_inflight_debug = 0; 246 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 247 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 248 249 static int tcp_inflight_min = 6144; 250 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 251 &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); 252 253 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 254 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 255 &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); 256 257 static int tcp_inflight_stab = 50; 258 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 259 &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 3 packets)"); 260 261 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); 262 static struct malloc_pipe tcptemp_mpipe; 263 264 static void tcp_willblock(int); 265 static void tcp_cleartaocache (void); 266 static void tcp_notify (struct inpcb *, int); 267 268 struct tcp_stats tcpstats_percpu[MAXCPU]; 269 #ifdef SMP 270 static int 271 sysctl_tcpstats(SYSCTL_HANDLER_ARGS) 272 { 273 int cpu, error = 0; 274 275 for (cpu = 0; cpu < ncpus; ++cpu) { 276 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu], 277 sizeof(struct tcp_stats)))) 278 break; 279 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu], 280 sizeof(struct tcp_stats)))) 281 break; 282 } 283 284 return (error); 285 } 286 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 287 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); 288 #else 289 SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 290 &tcpstat, tcp_stats, "TCP statistics"); 291 #endif 292 293 /* 294 * Target size of TCP PCB hash tables. Must be a power of two. 295 * 296 * Note that this can be overridden by the kernel environment 297 * variable net.inet.tcp.tcbhashsize 298 */ 299 #ifndef TCBHASHSIZE 300 #define TCBHASHSIZE 512 301 #endif 302 303 /* 304 * This is the actual shape of what we allocate using the zone 305 * allocator. Doing it this way allows us to protect both structures 306 * using the same generation count, and also eliminates the overhead 307 * of allocating tcpcbs separately. By hiding the structure here, 308 * we avoid changing most of the rest of the code (although it needs 309 * to be changed, eventually, for greater efficiency). 310 */ 311 #define ALIGNMENT 32 312 #define ALIGNM1 (ALIGNMENT - 1) 313 struct inp_tp { 314 union { 315 struct inpcb inp; 316 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 317 } inp_tp_u; 318 struct tcpcb tcb; 319 struct tcp_callout inp_tp_rexmt; 320 struct tcp_callout inp_tp_persist; 321 struct tcp_callout inp_tp_keep; 322 struct tcp_callout inp_tp_2msl; 323 struct tcp_callout inp_tp_delack; 324 struct netmsg_tcp_timer inp_tp_timermsg; 325 }; 326 #undef ALIGNMENT 327 #undef ALIGNM1 328 329 /* 330 * Tcp initialization 331 */ 332 void 333 tcp_init(void) 334 { 335 struct inpcbporthead *porthashbase; 336 u_long porthashmask; 337 struct vm_zone *ipi_zone; 338 int hashsize = TCBHASHSIZE; 339 int cpu; 340 341 /* 342 * note: tcptemp is used for keepalives, and it is ok for an 343 * allocation to fail so do not specify MPF_INT. 344 */ 345 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), 346 25, -1, 0, NULL); 347 348 tcp_ccgen = 1; 349 tcp_cleartaocache(); 350 351 tcp_delacktime = TCPTV_DELACK; 352 tcp_keepinit = TCPTV_KEEP_INIT; 353 tcp_keepidle = TCPTV_KEEP_IDLE; 354 tcp_keepintvl = TCPTV_KEEPINTVL; 355 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 356 tcp_msl = TCPTV_MSL; 357 tcp_rexmit_min = TCPTV_MIN; 358 tcp_rexmit_slop = TCPTV_CPU_VAR; 359 360 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 361 if (!powerof2(hashsize)) { 362 kprintf("WARNING: TCB hash size not a power of 2\n"); 363 hashsize = 512; /* safe default */ 364 } 365 tcp_tcbhashsize = hashsize; 366 porthashbase = hashinit(hashsize, M_PCB, &porthashmask); 367 ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets, 368 ZONE_INTERRUPT, 0); 369 370 for (cpu = 0; cpu < ncpus2; cpu++) { 371 in_pcbinfo_init(&tcbinfo[cpu]); 372 tcbinfo[cpu].cpu = cpu; 373 tcbinfo[cpu].hashbase = hashinit(hashsize, M_PCB, 374 &tcbinfo[cpu].hashmask); 375 tcbinfo[cpu].porthashbase = porthashbase; 376 tcbinfo[cpu].porthashmask = porthashmask; 377 tcbinfo[cpu].wildcardhashbase = hashinit(hashsize, M_PCB, 378 &tcbinfo[cpu].wildcardhashmask); 379 tcbinfo[cpu].ipi_zone = ipi_zone; 380 TAILQ_INIT(&tcpcbackq[cpu]); 381 } 382 383 tcp_reass_maxseg = nmbclusters / 16; 384 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); 385 386 #ifdef INET6 387 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 388 #else 389 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 390 #endif 391 if (max_protohdr < TCP_MINPROTOHDR) 392 max_protohdr = TCP_MINPROTOHDR; 393 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 394 panic("tcp_init"); 395 #undef TCP_MINPROTOHDR 396 397 /* 398 * Initialize TCP statistics counters for each CPU. 399 */ 400 #ifdef SMP 401 for (cpu = 0; cpu < ncpus; ++cpu) { 402 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); 403 } 404 #else 405 bzero(&tcpstat, sizeof(struct tcp_stats)); 406 #endif 407 408 syncache_init(); 409 tcp_thread_init(); 410 } 411 412 void 413 tcpmsg_service_loop(void *dummy) 414 { 415 struct netmsg *msg; 416 int mplocked; 417 418 /* 419 * Thread was started with TDF_MPSAFE 420 */ 421 mplocked = 0; 422 423 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) { 424 do { 425 logtcp(rxmsg); 426 mplocked = netmsg_service(msg, tcp_mpsafe_thread, 427 mplocked); 428 } while ((msg = lwkt_getport(&curthread->td_msgport)) != NULL); 429 430 logtcp(delayed); 431 tcp_willblock(mplocked); 432 logtcp(wait); 433 } 434 } 435 436 static void 437 tcp_willblock(int mplocked) 438 { 439 struct tcpcb *tp; 440 int cpu = mycpu->gd_cpuid; 441 int unlock = 0; 442 443 if (!mplocked && !tcp_mpsafe_proto) { 444 if (TAILQ_EMPTY(&tcpcbackq[cpu])) 445 return; 446 447 get_mplock(); 448 mplocked = 1; 449 unlock = 1; 450 } 451 452 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) { 453 KKASSERT(tp->t_flags & TF_ONOUTPUTQ); 454 tp->t_flags &= ~TF_ONOUTPUTQ; 455 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq); 456 tcp_output(tp); 457 } 458 459 if (unlock) 460 rel_mplock(); 461 } 462 463 464 /* 465 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 466 * tcp_template used to store this data in mbufs, but we now recopy it out 467 * of the tcpcb each time to conserve mbufs. 468 */ 469 void 470 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) 471 { 472 struct inpcb *inp = tp->t_inpcb; 473 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 474 475 #ifdef INET6 476 if (inp->inp_vflag & INP_IPV6) { 477 struct ip6_hdr *ip6; 478 479 ip6 = (struct ip6_hdr *)ip_ptr; 480 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 481 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 482 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 483 (IPV6_VERSION & IPV6_VERSION_MASK); 484 ip6->ip6_nxt = IPPROTO_TCP; 485 ip6->ip6_plen = sizeof(struct tcphdr); 486 ip6->ip6_src = inp->in6p_laddr; 487 ip6->ip6_dst = inp->in6p_faddr; 488 tcp_hdr->th_sum = 0; 489 } else 490 #endif 491 { 492 struct ip *ip = (struct ip *) ip_ptr; 493 494 ip->ip_vhl = IP_VHL_BORING; 495 ip->ip_tos = 0; 496 ip->ip_len = 0; 497 ip->ip_id = 0; 498 ip->ip_off = 0; 499 ip->ip_ttl = 0; 500 ip->ip_sum = 0; 501 ip->ip_p = IPPROTO_TCP; 502 ip->ip_src = inp->inp_laddr; 503 ip->ip_dst = inp->inp_faddr; 504 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, 505 ip->ip_dst.s_addr, 506 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 507 } 508 509 tcp_hdr->th_sport = inp->inp_lport; 510 tcp_hdr->th_dport = inp->inp_fport; 511 tcp_hdr->th_seq = 0; 512 tcp_hdr->th_ack = 0; 513 tcp_hdr->th_x2 = 0; 514 tcp_hdr->th_off = 5; 515 tcp_hdr->th_flags = 0; 516 tcp_hdr->th_win = 0; 517 tcp_hdr->th_urp = 0; 518 } 519 520 /* 521 * Create template to be used to send tcp packets on a connection. 522 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 523 * use for this function is in keepalives, which use tcp_respond. 524 */ 525 struct tcptemp * 526 tcp_maketemplate(struct tcpcb *tp) 527 { 528 struct tcptemp *tmp; 529 530 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) 531 return (NULL); 532 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t); 533 return (tmp); 534 } 535 536 void 537 tcp_freetemplate(struct tcptemp *tmp) 538 { 539 mpipe_free(&tcptemp_mpipe, tmp); 540 } 541 542 /* 543 * Send a single message to the TCP at address specified by 544 * the given TCP/IP header. If m == NULL, then we make a copy 545 * of the tcpiphdr at ti and send directly to the addressed host. 546 * This is used to force keep alive messages out using the TCP 547 * template for a connection. If flags are given then we send 548 * a message back to the TCP which originated the * segment ti, 549 * and discard the mbuf containing it and any other attached mbufs. 550 * 551 * In any case the ack and sequence number of the transmitted 552 * segment are as specified by the parameters. 553 * 554 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 555 */ 556 void 557 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 558 tcp_seq ack, tcp_seq seq, int flags) 559 { 560 int tlen; 561 int win = 0; 562 struct route *ro = NULL; 563 struct route sro; 564 struct ip *ip = ipgen; 565 struct tcphdr *nth; 566 int ipflags = 0; 567 struct route_in6 *ro6 = NULL; 568 struct route_in6 sro6; 569 struct ip6_hdr *ip6 = ipgen; 570 boolean_t use_tmpro = TRUE; 571 #ifdef INET6 572 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); 573 #else 574 const boolean_t isipv6 = FALSE; 575 #endif 576 577 if (tp != NULL) { 578 if (!(flags & TH_RST)) { 579 win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv); 580 if (win < 0) 581 win = 0; 582 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 583 win = (long)TCP_MAXWIN << tp->rcv_scale; 584 } 585 /* 586 * Don't use the route cache of a listen socket, 587 * it is not MPSAFE; use temporary route cache. 588 */ 589 if (tp->t_state != TCPS_LISTEN) { 590 if (isipv6) 591 ro6 = &tp->t_inpcb->in6p_route; 592 else 593 ro = &tp->t_inpcb->inp_route; 594 use_tmpro = FALSE; 595 } 596 } 597 if (use_tmpro) { 598 if (isipv6) { 599 ro6 = &sro6; 600 bzero(ro6, sizeof *ro6); 601 } else { 602 ro = &sro; 603 bzero(ro, sizeof *ro); 604 } 605 } 606 if (m == NULL) { 607 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 608 if (m == NULL) 609 return; 610 tlen = 0; 611 m->m_data += max_linkhdr; 612 if (isipv6) { 613 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); 614 ip6 = mtod(m, struct ip6_hdr *); 615 nth = (struct tcphdr *)(ip6 + 1); 616 } else { 617 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); 618 ip = mtod(m, struct ip *); 619 nth = (struct tcphdr *)(ip + 1); 620 } 621 bcopy(th, nth, sizeof(struct tcphdr)); 622 flags = TH_ACK; 623 } else { 624 m_freem(m->m_next); 625 m->m_next = NULL; 626 m->m_data = (caddr_t)ipgen; 627 /* m_len is set later */ 628 tlen = 0; 629 #define xchg(a, b, type) { type t; t = a; a = b; b = t; } 630 if (isipv6) { 631 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 632 nth = (struct tcphdr *)(ip6 + 1); 633 } else { 634 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 635 nth = (struct tcphdr *)(ip + 1); 636 } 637 if (th != nth) { 638 /* 639 * this is usually a case when an extension header 640 * exists between the IPv6 header and the 641 * TCP header. 642 */ 643 nth->th_sport = th->th_sport; 644 nth->th_dport = th->th_dport; 645 } 646 xchg(nth->th_dport, nth->th_sport, n_short); 647 #undef xchg 648 } 649 if (isipv6) { 650 ip6->ip6_flow = 0; 651 ip6->ip6_vfc = IPV6_VERSION; 652 ip6->ip6_nxt = IPPROTO_TCP; 653 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); 654 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 655 } else { 656 tlen += sizeof(struct tcpiphdr); 657 ip->ip_len = tlen; 658 ip->ip_ttl = ip_defttl; 659 } 660 m->m_len = tlen; 661 m->m_pkthdr.len = tlen; 662 m->m_pkthdr.rcvif = NULL; 663 nth->th_seq = htonl(seq); 664 nth->th_ack = htonl(ack); 665 nth->th_x2 = 0; 666 nth->th_off = sizeof(struct tcphdr) >> 2; 667 nth->th_flags = flags; 668 if (tp != NULL) 669 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 670 else 671 nth->th_win = htons((u_short)win); 672 nth->th_urp = 0; 673 if (isipv6) { 674 nth->th_sum = 0; 675 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 676 sizeof(struct ip6_hdr), 677 tlen - sizeof(struct ip6_hdr)); 678 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 679 (ro6 && ro6->ro_rt) ? 680 ro6->ro_rt->rt_ifp : NULL); 681 } else { 682 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 683 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 684 m->m_pkthdr.csum_flags = CSUM_TCP; 685 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 686 } 687 #ifdef TCPDEBUG 688 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 689 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 690 #endif 691 if (isipv6) { 692 ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 693 tp ? tp->t_inpcb : NULL); 694 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { 695 RTFREE(ro6->ro_rt); 696 ro6->ro_rt = NULL; 697 } 698 } else { 699 ipflags |= IP_DEBUGROUTE; 700 ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); 701 if ((ro == &sro) && (ro->ro_rt != NULL)) { 702 RTFREE(ro->ro_rt); 703 ro->ro_rt = NULL; 704 } 705 } 706 } 707 708 /* 709 * Create a new TCP control block, making an 710 * empty reassembly queue and hooking it to the argument 711 * protocol control block. The `inp' parameter must have 712 * come from the zone allocator set up in tcp_init(). 713 */ 714 struct tcpcb * 715 tcp_newtcpcb(struct inpcb *inp) 716 { 717 struct inp_tp *it; 718 struct tcpcb *tp; 719 #ifdef INET6 720 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 721 #else 722 const boolean_t isipv6 = FALSE; 723 #endif 724 725 it = (struct inp_tp *)inp; 726 tp = &it->tcb; 727 bzero(tp, sizeof(struct tcpcb)); 728 LIST_INIT(&tp->t_segq); 729 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 730 731 /* Set up our timeouts. */ 732 tp->tt_rexmt = &it->inp_tp_rexmt; 733 tp->tt_persist = &it->inp_tp_persist; 734 tp->tt_keep = &it->inp_tp_keep; 735 tp->tt_2msl = &it->inp_tp_2msl; 736 tp->tt_delack = &it->inp_tp_delack; 737 tcp_inittimers(tp); 738 739 /* 740 * Zero out timer message. We don't create it here, 741 * since the current CPU may not be the owner of this 742 * inpcb. 743 */ 744 tp->tt_msg = &it->inp_tp_timermsg; 745 bzero(tp->tt_msg, sizeof(*tp->tt_msg)); 746 747 if (tcp_do_rfc1323) 748 tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); 749 if (tcp_do_rfc1644) 750 tp->t_flags |= TF_REQ_CC; 751 tp->t_inpcb = inp; /* XXX */ 752 tp->t_state = TCPS_CLOSED; 753 /* 754 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 755 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 756 * reasonable initial retransmit time. 757 */ 758 tp->t_srtt = TCPTV_SRTTBASE; 759 tp->t_rttvar = 760 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 761 tp->t_rttmin = tcp_rexmit_min; 762 tp->t_rxtcur = TCPTV_RTOBASE; 763 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 764 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 765 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 766 tp->t_rcvtime = ticks; 767 /* 768 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 769 * because the socket may be bound to an IPv6 wildcard address, 770 * which may match an IPv4-mapped IPv6 address. 771 */ 772 inp->inp_ip_ttl = ip_defttl; 773 inp->inp_ppcb = tp; 774 tcp_sack_tcpcb_init(tp); 775 return (tp); /* XXX */ 776 } 777 778 /* 779 * Drop a TCP connection, reporting the specified error. 780 * If connection is synchronized, then send a RST to peer. 781 */ 782 struct tcpcb * 783 tcp_drop(struct tcpcb *tp, int error) 784 { 785 struct socket *so = tp->t_inpcb->inp_socket; 786 787 if (TCPS_HAVERCVDSYN(tp->t_state)) { 788 tp->t_state = TCPS_CLOSED; 789 tcp_output(tp); 790 tcpstat.tcps_drops++; 791 } else 792 tcpstat.tcps_conndrops++; 793 if (error == ETIMEDOUT && tp->t_softerror) 794 error = tp->t_softerror; 795 so->so_error = error; 796 return (tcp_close(tp)); 797 } 798 799 #ifdef SMP 800 801 struct netmsg_remwildcard { 802 struct netmsg nm_netmsg; 803 struct inpcb *nm_inp; 804 struct inpcbinfo *nm_pcbinfo; 805 #if defined(INET6) 806 int nm_isinet6; 807 #else 808 int nm_unused01; 809 #endif 810 }; 811 812 /* 813 * Wildcard inpcb's on SMP boxes must be removed from all cpus before the 814 * inp can be detached. We do this by cycling through the cpus, ending up 815 * on the cpu controlling the inp last and then doing the disconnect. 816 */ 817 static void 818 in_pcbremwildcardhash_handler(struct netmsg *msg0) 819 { 820 struct netmsg_remwildcard *msg = (struct netmsg_remwildcard *)msg0; 821 int cpu; 822 823 cpu = msg->nm_pcbinfo->cpu; 824 825 if (cpu == msg->nm_inp->inp_pcbinfo->cpu) { 826 /* note: detach removes any wildcard hash entry */ 827 #ifdef INET6 828 if (msg->nm_isinet6) 829 in6_pcbdetach(msg->nm_inp); 830 else 831 #endif 832 in_pcbdetach(msg->nm_inp); 833 lwkt_replymsg(&msg->nm_netmsg.nm_lmsg, 0); 834 } else { 835 in_pcbremwildcardhash_oncpu(msg->nm_inp, msg->nm_pcbinfo); 836 cpu = (cpu + 1) % ncpus2; 837 msg->nm_pcbinfo = &tcbinfo[cpu]; 838 lwkt_forwardmsg(tcp_cport(cpu), &msg->nm_netmsg.nm_lmsg); 839 } 840 } 841 842 #endif 843 844 /* 845 * Close a TCP control block: 846 * discard all space held by the tcp 847 * discard internet protocol block 848 * wake up any sleepers 849 */ 850 struct tcpcb * 851 tcp_close(struct tcpcb *tp) 852 { 853 struct tseg_qent *q; 854 struct inpcb *inp = tp->t_inpcb; 855 struct socket *so = inp->inp_socket; 856 struct rtentry *rt; 857 boolean_t dosavessthresh; 858 #ifdef SMP 859 int cpu; 860 #endif 861 #ifdef INET6 862 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 863 boolean_t isafinet6 = (INP_CHECK_SOCKAF(so, AF_INET6) != 0); 864 #else 865 const boolean_t isipv6 = FALSE; 866 #endif 867 868 /* 869 * The tp is not instantly destroyed in the wildcard case. Setting 870 * the state to TCPS_TERMINATING will prevent the TCP stack from 871 * messing with it, though it should be noted that this change may 872 * not take effect on other cpus until we have chained the wildcard 873 * hash removal. 874 * 875 * XXX we currently depend on the BGL to synchronize the tp->t_state 876 * update and prevent other tcp protocol threads from accepting new 877 * connections on the listen socket we might be trying to close down. 878 */ 879 KKASSERT(tp->t_state != TCPS_TERMINATING); 880 tp->t_state = TCPS_TERMINATING; 881 882 /* 883 * Make sure that all of our timers are stopped before we 884 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL), 885 * timers are never used. If timer message is never created 886 * (tp->tt_msg->tt_tcb == NULL), timers are never used too. 887 */ 888 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) { 889 tcp_callout_stop(tp, tp->tt_rexmt); 890 tcp_callout_stop(tp, tp->tt_persist); 891 tcp_callout_stop(tp, tp->tt_keep); 892 tcp_callout_stop(tp, tp->tt_2msl); 893 tcp_callout_stop(tp, tp->tt_delack); 894 } 895 896 if (tp->t_flags & TF_ONOUTPUTQ) { 897 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); 898 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq); 899 tp->t_flags &= ~TF_ONOUTPUTQ; 900 } 901 902 /* 903 * If we got enough samples through the srtt filter, 904 * save the rtt and rttvar in the routing entry. 905 * 'Enough' is arbitrarily defined as the 16 samples. 906 * 16 samples is enough for the srtt filter to converge 907 * to within 5% of the correct value; fewer samples and 908 * we could save a very bogus rtt. 909 * 910 * Don't update the default route's characteristics and don't 911 * update anything that the user "locked". 912 */ 913 if (tp->t_rttupdated >= 16) { 914 u_long i = 0; 915 916 if (isipv6) { 917 struct sockaddr_in6 *sin6; 918 919 if ((rt = inp->in6p_route.ro_rt) == NULL) 920 goto no_valid_rt; 921 sin6 = (struct sockaddr_in6 *)rt_key(rt); 922 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 923 goto no_valid_rt; 924 } else 925 if ((rt = inp->inp_route.ro_rt) == NULL || 926 ((struct sockaddr_in *)rt_key(rt))-> 927 sin_addr.s_addr == INADDR_ANY) 928 goto no_valid_rt; 929 930 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { 931 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 932 if (rt->rt_rmx.rmx_rtt && i) 933 /* 934 * filter this update to half the old & half 935 * the new values, converting scale. 936 * See route.h and tcp_var.h for a 937 * description of the scaling constants. 938 */ 939 rt->rt_rmx.rmx_rtt = 940 (rt->rt_rmx.rmx_rtt + i) / 2; 941 else 942 rt->rt_rmx.rmx_rtt = i; 943 tcpstat.tcps_cachedrtt++; 944 } 945 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { 946 i = tp->t_rttvar * 947 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 948 if (rt->rt_rmx.rmx_rttvar && i) 949 rt->rt_rmx.rmx_rttvar = 950 (rt->rt_rmx.rmx_rttvar + i) / 2; 951 else 952 rt->rt_rmx.rmx_rttvar = i; 953 tcpstat.tcps_cachedrttvar++; 954 } 955 /* 956 * The old comment here said: 957 * update the pipelimit (ssthresh) if it has been updated 958 * already or if a pipesize was specified & the threshhold 959 * got below half the pipesize. I.e., wait for bad news 960 * before we start updating, then update on both good 961 * and bad news. 962 * 963 * But we want to save the ssthresh even if no pipesize is 964 * specified explicitly in the route, because such 965 * connections still have an implicit pipesize specified 966 * by the global tcp_sendspace. In the absence of a reliable 967 * way to calculate the pipesize, it will have to do. 968 */ 969 i = tp->snd_ssthresh; 970 if (rt->rt_rmx.rmx_sendpipe != 0) 971 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); 972 else 973 dosavessthresh = (i < so->so_snd.ssb_hiwat/2); 974 if (dosavessthresh || 975 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && 976 (rt->rt_rmx.rmx_ssthresh != 0))) { 977 /* 978 * convert the limit from user data bytes to 979 * packets then to packet data bytes. 980 */ 981 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 982 if (i < 2) 983 i = 2; 984 i *= tp->t_maxseg + 985 (isipv6 ? 986 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 987 sizeof(struct tcpiphdr)); 988 if (rt->rt_rmx.rmx_ssthresh) 989 rt->rt_rmx.rmx_ssthresh = 990 (rt->rt_rmx.rmx_ssthresh + i) / 2; 991 else 992 rt->rt_rmx.rmx_ssthresh = i; 993 tcpstat.tcps_cachedssthresh++; 994 } 995 } 996 997 no_valid_rt: 998 /* free the reassembly queue, if any */ 999 while((q = LIST_FIRST(&tp->t_segq)) != NULL) { 1000 LIST_REMOVE(q, tqe_q); 1001 m_freem(q->tqe_m); 1002 FREE(q, M_TSEGQ); 1003 tcp_reass_qsize--; 1004 } 1005 /* throw away SACK blocks in scoreboard*/ 1006 if (TCP_DO_SACK(tp)) 1007 tcp_sack_cleanup(&tp->scb); 1008 1009 inp->inp_ppcb = NULL; 1010 soisdisconnected(so); 1011 1012 tcp_destroy_timermsg(tp); 1013 1014 /* 1015 * Discard the inp. In the SMP case a wildcard inp's hash (created 1016 * by a listen socket or an INADDR_ANY udp socket) is replicated 1017 * for each protocol thread and must be removed in the context of 1018 * that thread. This is accomplished by chaining the message 1019 * through the cpus. 1020 * 1021 * If the inp is not wildcarded we simply detach, which will remove 1022 * the any hashes still present for this inp. 1023 */ 1024 #ifdef SMP 1025 if (inp->inp_flags & INP_WILDCARD_MP) { 1026 struct netmsg_remwildcard *msg; 1027 1028 cpu = (inp->inp_pcbinfo->cpu + 1) % ncpus2; 1029 msg = kmalloc(sizeof(struct netmsg_remwildcard), 1030 M_LWKTMSG, M_INTWAIT); 1031 netmsg_init(&msg->nm_netmsg, &netisr_afree_rport, 0, 1032 in_pcbremwildcardhash_handler); 1033 #ifdef INET6 1034 msg->nm_isinet6 = isafinet6; 1035 #endif 1036 msg->nm_inp = inp; 1037 msg->nm_pcbinfo = &tcbinfo[cpu]; 1038 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_netmsg.nm_lmsg); 1039 } else 1040 #endif 1041 { 1042 /* note: detach removes any wildcard hash entry */ 1043 #ifdef INET6 1044 if (isafinet6) 1045 in6_pcbdetach(inp); 1046 else 1047 #endif 1048 in_pcbdetach(inp); 1049 } 1050 tcpstat.tcps_closed++; 1051 return (NULL); 1052 } 1053 1054 static __inline void 1055 tcp_drain_oncpu(struct inpcbhead *head) 1056 { 1057 struct inpcb *inpb; 1058 struct tcpcb *tcpb; 1059 struct tseg_qent *te; 1060 1061 LIST_FOREACH(inpb, head, inp_list) { 1062 if (inpb->inp_flags & INP_PLACEMARKER) 1063 continue; 1064 if ((tcpb = intotcpcb(inpb))) { 1065 while ((te = LIST_FIRST(&tcpb->t_segq)) != NULL) { 1066 LIST_REMOVE(te, tqe_q); 1067 m_freem(te->tqe_m); 1068 FREE(te, M_TSEGQ); 1069 tcp_reass_qsize--; 1070 } 1071 } 1072 } 1073 } 1074 1075 #ifdef SMP 1076 struct netmsg_tcp_drain { 1077 struct netmsg nm_netmsg; 1078 struct inpcbhead *nm_head; 1079 }; 1080 1081 static void 1082 tcp_drain_handler(netmsg_t netmsg) 1083 { 1084 struct netmsg_tcp_drain *nm = (void *)netmsg; 1085 1086 tcp_drain_oncpu(nm->nm_head); 1087 lwkt_replymsg(&nm->nm_netmsg.nm_lmsg, 0); 1088 } 1089 #endif 1090 1091 void 1092 tcp_drain(void) 1093 { 1094 #ifdef SMP 1095 int cpu; 1096 #endif 1097 1098 if (!do_tcpdrain) 1099 return; 1100 1101 /* 1102 * Walk the tcpbs, if existing, and flush the reassembly queue, 1103 * if there is one... 1104 * XXX: The "Net/3" implementation doesn't imply that the TCP 1105 * reassembly queue should be flushed, but in a situation 1106 * where we're really low on mbufs, this is potentially 1107 * useful. 1108 */ 1109 #ifdef SMP 1110 for (cpu = 0; cpu < ncpus2; cpu++) { 1111 struct netmsg_tcp_drain *msg; 1112 1113 if (cpu == mycpu->gd_cpuid) { 1114 tcp_drain_oncpu(&tcbinfo[cpu].pcblisthead); 1115 } else { 1116 msg = kmalloc(sizeof(struct netmsg_tcp_drain), 1117 M_LWKTMSG, M_NOWAIT); 1118 if (msg == NULL) 1119 continue; 1120 netmsg_init(&msg->nm_netmsg, &netisr_afree_rport, 0, 1121 tcp_drain_handler); 1122 msg->nm_head = &tcbinfo[cpu].pcblisthead; 1123 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_netmsg.nm_lmsg); 1124 } 1125 } 1126 #else 1127 tcp_drain_oncpu(&tcbinfo[0].pcblisthead); 1128 #endif 1129 } 1130 1131 /* 1132 * Notify a tcp user of an asynchronous error; 1133 * store error as soft error, but wake up user 1134 * (for now, won't do anything until can select for soft error). 1135 * 1136 * Do not wake up user since there currently is no mechanism for 1137 * reporting soft errors (yet - a kqueue filter may be added). 1138 */ 1139 static void 1140 tcp_notify(struct inpcb *inp, int error) 1141 { 1142 struct tcpcb *tp = intotcpcb(inp); 1143 1144 /* 1145 * Ignore some errors if we are hooked up. 1146 * If connection hasn't completed, has retransmitted several times, 1147 * and receives a second error, give up now. This is better 1148 * than waiting a long time to establish a connection that 1149 * can never complete. 1150 */ 1151 if (tp->t_state == TCPS_ESTABLISHED && 1152 (error == EHOSTUNREACH || error == ENETUNREACH || 1153 error == EHOSTDOWN)) { 1154 return; 1155 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1156 tp->t_softerror) 1157 tcp_drop(tp, error); 1158 else 1159 tp->t_softerror = error; 1160 #if 0 1161 wakeup(&so->so_timeo); 1162 sorwakeup(so); 1163 sowwakeup(so); 1164 #endif 1165 } 1166 1167 static int 1168 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1169 { 1170 int error, i, n; 1171 struct inpcb *marker; 1172 struct inpcb *inp; 1173 inp_gen_t gencnt; 1174 globaldata_t gd; 1175 int origcpu, ccpu; 1176 1177 error = 0; 1178 n = 0; 1179 1180 /* 1181 * The process of preparing the TCB list is too time-consuming and 1182 * resource-intensive to repeat twice on every request. 1183 */ 1184 if (req->oldptr == NULL) { 1185 for (ccpu = 0; ccpu < ncpus; ++ccpu) { 1186 gd = globaldata_find(ccpu); 1187 n += tcbinfo[gd->gd_cpuid].ipi_count; 1188 } 1189 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb); 1190 return (0); 1191 } 1192 1193 if (req->newptr != NULL) 1194 return (EPERM); 1195 1196 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); 1197 marker->inp_flags |= INP_PLACEMARKER; 1198 1199 /* 1200 * OK, now we're committed to doing something. Run the inpcb list 1201 * for each cpu in the system and construct the output. Use a 1202 * list placemarker to deal with list changes occuring during 1203 * copyout blockages (but otherwise depend on being on the correct 1204 * cpu to avoid races). 1205 */ 1206 origcpu = mycpu->gd_cpuid; 1207 for (ccpu = 1; ccpu <= ncpus && error == 0; ++ccpu) { 1208 globaldata_t rgd; 1209 caddr_t inp_ppcb; 1210 struct xtcpcb xt; 1211 int cpu_id; 1212 1213 cpu_id = (origcpu + ccpu) % ncpus; 1214 if ((smp_active_mask & (1 << cpu_id)) == 0) 1215 continue; 1216 rgd = globaldata_find(cpu_id); 1217 lwkt_setcpu_self(rgd); 1218 1219 gencnt = tcbinfo[cpu_id].ipi_gencnt; 1220 n = tcbinfo[cpu_id].ipi_count; 1221 1222 LIST_INSERT_HEAD(&tcbinfo[cpu_id].pcblisthead, marker, inp_list); 1223 i = 0; 1224 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { 1225 /* 1226 * process a snapshot of pcbs, ignoring placemarkers 1227 * and using our own to allow SYSCTL_OUT to block. 1228 */ 1229 LIST_REMOVE(marker, inp_list); 1230 LIST_INSERT_AFTER(inp, marker, inp_list); 1231 1232 if (inp->inp_flags & INP_PLACEMARKER) 1233 continue; 1234 if (inp->inp_gencnt > gencnt) 1235 continue; 1236 if (prison_xinpcb(req->td, inp)) 1237 continue; 1238 1239 xt.xt_len = sizeof xt; 1240 bcopy(inp, &xt.xt_inp, sizeof *inp); 1241 inp_ppcb = inp->inp_ppcb; 1242 if (inp_ppcb != NULL) 1243 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 1244 else 1245 bzero(&xt.xt_tp, sizeof xt.xt_tp); 1246 if (inp->inp_socket) 1247 sotoxsocket(inp->inp_socket, &xt.xt_socket); 1248 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) 1249 break; 1250 ++i; 1251 } 1252 LIST_REMOVE(marker, inp_list); 1253 if (error == 0 && i < n) { 1254 bzero(&xt, sizeof xt); 1255 xt.xt_len = sizeof xt; 1256 while (i < n) { 1257 error = SYSCTL_OUT(req, &xt, sizeof xt); 1258 if (error) 1259 break; 1260 ++i; 1261 } 1262 } 1263 } 1264 1265 /* 1266 * Make sure we are on the same cpu we were on originally, since 1267 * higher level callers expect this. Also don't pollute caches with 1268 * migrated userland data by (eventually) returning to userland 1269 * on a different cpu. 1270 */ 1271 lwkt_setcpu_self(globaldata_find(origcpu)); 1272 kfree(marker, M_TEMP); 1273 return (error); 1274 } 1275 1276 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1277 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1278 1279 static int 1280 tcp_getcred(SYSCTL_HANDLER_ARGS) 1281 { 1282 struct sockaddr_in addrs[2]; 1283 struct inpcb *inp; 1284 int cpu; 1285 int error; 1286 1287 error = priv_check(req->td, PRIV_ROOT); 1288 if (error != 0) 1289 return (error); 1290 error = SYSCTL_IN(req, addrs, sizeof addrs); 1291 if (error != 0) 1292 return (error); 1293 crit_enter(); 1294 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 1295 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 1296 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 1297 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1298 if (inp == NULL || inp->inp_socket == NULL) { 1299 error = ENOENT; 1300 goto out; 1301 } 1302 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1303 out: 1304 crit_exit(); 1305 return (error); 1306 } 1307 1308 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1309 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 1310 1311 #ifdef INET6 1312 static int 1313 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1314 { 1315 struct sockaddr_in6 addrs[2]; 1316 struct inpcb *inp; 1317 int error; 1318 boolean_t mapped = FALSE; 1319 1320 error = priv_check(req->td, PRIV_ROOT); 1321 if (error != 0) 1322 return (error); 1323 error = SYSCTL_IN(req, addrs, sizeof addrs); 1324 if (error != 0) 1325 return (error); 1326 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1327 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1328 mapped = TRUE; 1329 else 1330 return (EINVAL); 1331 } 1332 crit_enter(); 1333 if (mapped) { 1334 inp = in_pcblookup_hash(&tcbinfo[0], 1335 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1336 addrs[1].sin6_port, 1337 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1338 addrs[0].sin6_port, 1339 0, NULL); 1340 } else { 1341 inp = in6_pcblookup_hash(&tcbinfo[0], 1342 &addrs[1].sin6_addr, addrs[1].sin6_port, 1343 &addrs[0].sin6_addr, addrs[0].sin6_port, 1344 0, NULL); 1345 } 1346 if (inp == NULL || inp->inp_socket == NULL) { 1347 error = ENOENT; 1348 goto out; 1349 } 1350 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1351 out: 1352 crit_exit(); 1353 return (error); 1354 } 1355 1356 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1357 0, 0, 1358 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1359 #endif 1360 1361 struct netmsg_tcp_notify { 1362 struct netmsg nm_nmsg; 1363 void (*nm_notify)(struct inpcb *, int); 1364 struct in_addr nm_faddr; 1365 int nm_arg; 1366 }; 1367 1368 static void 1369 tcp_notifyall_oncpu(struct netmsg *netmsg) 1370 { 1371 struct netmsg_tcp_notify *nmsg = (struct netmsg_tcp_notify *)netmsg; 1372 int nextcpu; 1373 1374 in_pcbnotifyall(&tcbinfo[mycpuid].pcblisthead, nmsg->nm_faddr, 1375 nmsg->nm_arg, nmsg->nm_notify); 1376 1377 nextcpu = mycpuid + 1; 1378 if (nextcpu < ncpus2) 1379 lwkt_forwardmsg(tcp_cport(nextcpu), &netmsg->nm_lmsg); 1380 else 1381 lwkt_replymsg(&netmsg->nm_lmsg, 0); 1382 } 1383 1384 void 1385 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 1386 { 1387 struct ip *ip = vip; 1388 struct tcphdr *th; 1389 struct in_addr faddr; 1390 struct inpcb *inp; 1391 struct tcpcb *tp; 1392 void (*notify)(struct inpcb *, int) = tcp_notify; 1393 tcp_seq icmpseq; 1394 int arg, cpu; 1395 1396 if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 1397 return; 1398 } 1399 1400 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1401 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1402 return; 1403 1404 arg = inetctlerrmap[cmd]; 1405 if (cmd == PRC_QUENCH) { 1406 notify = tcp_quench; 1407 } else if (icmp_may_rst && 1408 (cmd == PRC_UNREACH_ADMIN_PROHIB || 1409 cmd == PRC_UNREACH_PORT || 1410 cmd == PRC_TIMXCEED_INTRANS) && 1411 ip != NULL) { 1412 notify = tcp_drop_syn_sent; 1413 } else if (cmd == PRC_MSGSIZE) { 1414 struct icmp *icmp = (struct icmp *) 1415 ((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 1416 1417 arg = ntohs(icmp->icmp_nextmtu); 1418 notify = tcp_mtudisc; 1419 } else if (PRC_IS_REDIRECT(cmd)) { 1420 ip = NULL; 1421 notify = in_rtchange; 1422 } else if (cmd == PRC_HOSTDEAD) { 1423 ip = NULL; 1424 } 1425 1426 if (ip != NULL) { 1427 crit_enter(); 1428 th = (struct tcphdr *)((caddr_t)ip + 1429 (IP_VHL_HL(ip->ip_vhl) << 2)); 1430 cpu = tcp_addrcpu(faddr.s_addr, th->th_dport, 1431 ip->ip_src.s_addr, th->th_sport); 1432 inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport, 1433 ip->ip_src, th->th_sport, 0, NULL); 1434 if ((inp != NULL) && (inp->inp_socket != NULL)) { 1435 icmpseq = htonl(th->th_seq); 1436 tp = intotcpcb(inp); 1437 if (SEQ_GEQ(icmpseq, tp->snd_una) && 1438 SEQ_LT(icmpseq, tp->snd_max)) 1439 (*notify)(inp, arg); 1440 } else { 1441 struct in_conninfo inc; 1442 1443 inc.inc_fport = th->th_dport; 1444 inc.inc_lport = th->th_sport; 1445 inc.inc_faddr = faddr; 1446 inc.inc_laddr = ip->ip_src; 1447 #ifdef INET6 1448 inc.inc_isipv6 = 0; 1449 #endif 1450 syncache_unreach(&inc, th); 1451 } 1452 crit_exit(); 1453 } else { 1454 struct netmsg_tcp_notify nmsg; 1455 1456 KKASSERT(&curthread->td_msgport == cpu_portfn(0)); 1457 netmsg_init(&nmsg.nm_nmsg, &curthread->td_msgport, 0, 1458 tcp_notifyall_oncpu); 1459 nmsg.nm_faddr = faddr; 1460 nmsg.nm_arg = arg; 1461 nmsg.nm_notify = notify; 1462 1463 lwkt_domsg(tcp_cport(0), &nmsg.nm_nmsg.nm_lmsg, 0); 1464 } 1465 } 1466 1467 #ifdef INET6 1468 void 1469 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) 1470 { 1471 struct tcphdr th; 1472 void (*notify) (struct inpcb *, int) = tcp_notify; 1473 struct ip6_hdr *ip6; 1474 struct mbuf *m; 1475 struct ip6ctlparam *ip6cp = NULL; 1476 const struct sockaddr_in6 *sa6_src = NULL; 1477 int off; 1478 struct tcp_portonly { 1479 u_int16_t th_sport; 1480 u_int16_t th_dport; 1481 } *thp; 1482 int arg; 1483 1484 if (sa->sa_family != AF_INET6 || 1485 sa->sa_len != sizeof(struct sockaddr_in6)) 1486 return; 1487 1488 arg = 0; 1489 if (cmd == PRC_QUENCH) 1490 notify = tcp_quench; 1491 else if (cmd == PRC_MSGSIZE) { 1492 struct ip6ctlparam *ip6cp = d; 1493 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; 1494 1495 arg = ntohl(icmp6->icmp6_mtu); 1496 notify = tcp_mtudisc; 1497 } else if (!PRC_IS_REDIRECT(cmd) && 1498 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) { 1499 return; 1500 } 1501 1502 /* if the parameter is from icmp6, decode it. */ 1503 if (d != NULL) { 1504 ip6cp = (struct ip6ctlparam *)d; 1505 m = ip6cp->ip6c_m; 1506 ip6 = ip6cp->ip6c_ip6; 1507 off = ip6cp->ip6c_off; 1508 sa6_src = ip6cp->ip6c_src; 1509 } else { 1510 m = NULL; 1511 ip6 = NULL; 1512 off = 0; /* fool gcc */ 1513 sa6_src = &sa6_any; 1514 } 1515 1516 if (ip6 != NULL) { 1517 struct in_conninfo inc; 1518 /* 1519 * XXX: We assume that when IPV6 is non NULL, 1520 * M and OFF are valid. 1521 */ 1522 1523 /* check if we can safely examine src and dst ports */ 1524 if (m->m_pkthdr.len < off + sizeof *thp) 1525 return; 1526 1527 bzero(&th, sizeof th); 1528 m_copydata(m, off, sizeof *thp, (caddr_t)&th); 1529 1530 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, th.th_dport, 1531 (struct sockaddr *)ip6cp->ip6c_src, 1532 th.th_sport, cmd, arg, notify); 1533 1534 inc.inc_fport = th.th_dport; 1535 inc.inc_lport = th.th_sport; 1536 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1537 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1538 inc.inc_isipv6 = 1; 1539 syncache_unreach(&inc, &th); 1540 } else 1541 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, 0, 1542 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify); 1543 } 1544 #endif 1545 1546 /* 1547 * Following is where TCP initial sequence number generation occurs. 1548 * 1549 * There are two places where we must use initial sequence numbers: 1550 * 1. In SYN-ACK packets. 1551 * 2. In SYN packets. 1552 * 1553 * All ISNs for SYN-ACK packets are generated by the syncache. See 1554 * tcp_syncache.c for details. 1555 * 1556 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1557 * depends on this property. In addition, these ISNs should be 1558 * unguessable so as to prevent connection hijacking. To satisfy 1559 * the requirements of this situation, the algorithm outlined in 1560 * RFC 1948 is used to generate sequence numbers. 1561 * 1562 * Implementation details: 1563 * 1564 * Time is based off the system timer, and is corrected so that it 1565 * increases by one megabyte per second. This allows for proper 1566 * recycling on high speed LANs while still leaving over an hour 1567 * before rollover. 1568 * 1569 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1570 * between seeding of isn_secret. This is normally set to zero, 1571 * as reseeding should not be necessary. 1572 * 1573 */ 1574 1575 #define ISN_BYTES_PER_SECOND 1048576 1576 1577 u_char isn_secret[32]; 1578 int isn_last_reseed; 1579 MD5_CTX isn_ctx; 1580 1581 tcp_seq 1582 tcp_new_isn(struct tcpcb *tp) 1583 { 1584 u_int32_t md5_buffer[4]; 1585 tcp_seq new_isn; 1586 1587 /* Seed if this is the first use, reseed if requested. */ 1588 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1589 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1590 < (u_int)ticks))) { 1591 read_random_unlimited(&isn_secret, sizeof isn_secret); 1592 isn_last_reseed = ticks; 1593 } 1594 1595 /* Compute the md5 hash and return the ISN. */ 1596 MD5Init(&isn_ctx); 1597 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short)); 1598 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short)); 1599 #ifdef INET6 1600 if (tp->t_inpcb->inp_vflag & INP_IPV6) { 1601 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1602 sizeof(struct in6_addr)); 1603 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1604 sizeof(struct in6_addr)); 1605 } else 1606 #endif 1607 { 1608 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1609 sizeof(struct in_addr)); 1610 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1611 sizeof(struct in_addr)); 1612 } 1613 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1614 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1615 new_isn = (tcp_seq) md5_buffer[0]; 1616 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1617 return (new_isn); 1618 } 1619 1620 /* 1621 * When a source quench is received, close congestion window 1622 * to one segment. We will gradually open it again as we proceed. 1623 */ 1624 void 1625 tcp_quench(struct inpcb *inp, int error) 1626 { 1627 struct tcpcb *tp = intotcpcb(inp); 1628 1629 if (tp != NULL) { 1630 tp->snd_cwnd = tp->t_maxseg; 1631 tp->snd_wacked = 0; 1632 } 1633 } 1634 1635 /* 1636 * When a specific ICMP unreachable message is received and the 1637 * connection state is SYN-SENT, drop the connection. This behavior 1638 * is controlled by the icmp_may_rst sysctl. 1639 */ 1640 void 1641 tcp_drop_syn_sent(struct inpcb *inp, int error) 1642 { 1643 struct tcpcb *tp = intotcpcb(inp); 1644 1645 if ((tp != NULL) && (tp->t_state == TCPS_SYN_SENT)) 1646 tcp_drop(tp, error); 1647 } 1648 1649 /* 1650 * When a `need fragmentation' ICMP is received, update our idea of the MSS 1651 * based on the new value in the route. Also nudge TCP to send something, 1652 * since we know the packet we just sent was dropped. 1653 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1654 */ 1655 void 1656 tcp_mtudisc(struct inpcb *inp, int mtu) 1657 { 1658 struct tcpcb *tp = intotcpcb(inp); 1659 struct rtentry *rt; 1660 struct socket *so = inp->inp_socket; 1661 int maxopd, mss; 1662 #ifdef INET6 1663 boolean_t isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0); 1664 #else 1665 const boolean_t isipv6 = FALSE; 1666 #endif 1667 1668 if (tp == NULL) 1669 return; 1670 1671 /* 1672 * If no MTU is provided in the ICMP message, use the 1673 * next lower likely value, as specified in RFC 1191. 1674 */ 1675 if (mtu == 0) { 1676 int oldmtu; 1677 1678 oldmtu = tp->t_maxopd + 1679 (isipv6 ? 1680 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1681 sizeof(struct tcpiphdr)); 1682 mtu = ip_next_mtu(oldmtu, 0); 1683 } 1684 1685 if (isipv6) 1686 rt = tcp_rtlookup6(&inp->inp_inc); 1687 else 1688 rt = tcp_rtlookup(&inp->inp_inc); 1689 if (rt != NULL) { 1690 struct rmxp_tao *taop = rmx_taop(rt->rt_rmx); 1691 1692 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu) 1693 mtu = rt->rt_rmx.rmx_mtu; 1694 1695 maxopd = mtu - 1696 (isipv6 ? 1697 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1698 sizeof(struct tcpiphdr)); 1699 1700 /* 1701 * XXX - The following conditional probably violates the TCP 1702 * spec. The problem is that, since we don't know the 1703 * other end's MSS, we are supposed to use a conservative 1704 * default. But, if we do that, then MTU discovery will 1705 * never actually take place, because the conservative 1706 * default is much less than the MTUs typically seen 1707 * on the Internet today. For the moment, we'll sweep 1708 * this under the carpet. 1709 * 1710 * The conservative default might not actually be a problem 1711 * if the only case this occurs is when sending an initial 1712 * SYN with options and data to a host we've never talked 1713 * to before. Then, they will reply with an MSS value which 1714 * will get recorded and the new parameters should get 1715 * recomputed. For Further Study. 1716 */ 1717 if (taop->tao_mssopt != 0 && taop->tao_mssopt < maxopd) 1718 maxopd = taop->tao_mssopt; 1719 } else 1720 maxopd = mtu - 1721 (isipv6 ? 1722 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1723 sizeof(struct tcpiphdr)); 1724 1725 if (tp->t_maxopd <= maxopd) 1726 return; 1727 tp->t_maxopd = maxopd; 1728 1729 mss = maxopd; 1730 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) == 1731 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 1732 mss -= TCPOLEN_TSTAMP_APPA; 1733 1734 if ((tp->t_flags & (TF_REQ_CC | TF_RCVD_CC | TF_NOOPT)) == 1735 (TF_REQ_CC | TF_RCVD_CC)) 1736 mss -= TCPOLEN_CC_APPA; 1737 1738 /* round down to multiple of MCLBYTES */ 1739 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */ 1740 if (mss > MCLBYTES) 1741 mss &= ~(MCLBYTES - 1); 1742 #else 1743 if (mss > MCLBYTES) 1744 mss = (mss / MCLBYTES) * MCLBYTES; 1745 #endif 1746 1747 if (so->so_snd.ssb_hiwat < mss) 1748 mss = so->so_snd.ssb_hiwat; 1749 1750 tp->t_maxseg = mss; 1751 tp->t_rtttime = 0; 1752 tp->snd_nxt = tp->snd_una; 1753 tcp_output(tp); 1754 tcpstat.tcps_mturesent++; 1755 } 1756 1757 /* 1758 * Look-up the routing entry to the peer of this inpcb. If no route 1759 * is found and it cannot be allocated the return NULL. This routine 1760 * is called by TCP routines that access the rmx structure and by tcp_mss 1761 * to get the interface MTU. 1762 */ 1763 struct rtentry * 1764 tcp_rtlookup(struct in_conninfo *inc) 1765 { 1766 struct route *ro = &inc->inc_route; 1767 1768 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) { 1769 /* No route yet, so try to acquire one */ 1770 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1771 /* 1772 * unused portions of the structure MUST be zero'd 1773 * out because rtalloc() treats it as opaque data 1774 */ 1775 bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); 1776 ro->ro_dst.sa_family = AF_INET; 1777 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1778 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1779 inc->inc_faddr; 1780 rtalloc(ro); 1781 } 1782 } 1783 return (ro->ro_rt); 1784 } 1785 1786 #ifdef INET6 1787 struct rtentry * 1788 tcp_rtlookup6(struct in_conninfo *inc) 1789 { 1790 struct route_in6 *ro6 = &inc->inc6_route; 1791 1792 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) { 1793 /* No route yet, so try to acquire one */ 1794 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1795 /* 1796 * unused portions of the structure MUST be zero'd 1797 * out because rtalloc() treats it as opaque data 1798 */ 1799 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); 1800 ro6->ro_dst.sin6_family = AF_INET6; 1801 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1802 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1803 rtalloc((struct route *)ro6); 1804 } 1805 } 1806 return (ro6->ro_rt); 1807 } 1808 #endif 1809 1810 #ifdef IPSEC 1811 /* compute ESP/AH header size for TCP, including outer IP header. */ 1812 size_t 1813 ipsec_hdrsiz_tcp(struct tcpcb *tp) 1814 { 1815 struct inpcb *inp; 1816 struct mbuf *m; 1817 size_t hdrsiz; 1818 struct ip *ip; 1819 struct tcphdr *th; 1820 1821 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1822 return (0); 1823 MGETHDR(m, MB_DONTWAIT, MT_DATA); 1824 if (!m) 1825 return (0); 1826 1827 #ifdef INET6 1828 if (inp->inp_vflag & INP_IPV6) { 1829 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 1830 1831 th = (struct tcphdr *)(ip6 + 1); 1832 m->m_pkthdr.len = m->m_len = 1833 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1834 tcp_fillheaders(tp, ip6, th); 1835 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1836 } else 1837 #endif 1838 { 1839 ip = mtod(m, struct ip *); 1840 th = (struct tcphdr *)(ip + 1); 1841 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1842 tcp_fillheaders(tp, ip, th); 1843 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1844 } 1845 1846 m_free(m); 1847 return (hdrsiz); 1848 } 1849 #endif 1850 1851 /* 1852 * Return a pointer to the cached information about the remote host. 1853 * The cached information is stored in the protocol specific part of 1854 * the route metrics. 1855 */ 1856 struct rmxp_tao * 1857 tcp_gettaocache(struct in_conninfo *inc) 1858 { 1859 struct rtentry *rt; 1860 1861 #ifdef INET6 1862 if (inc->inc_isipv6) 1863 rt = tcp_rtlookup6(inc); 1864 else 1865 #endif 1866 rt = tcp_rtlookup(inc); 1867 1868 /* Make sure this is a host route and is up. */ 1869 if (rt == NULL || 1870 (rt->rt_flags & (RTF_UP | RTF_HOST)) != (RTF_UP | RTF_HOST)) 1871 return (NULL); 1872 1873 return (rmx_taop(rt->rt_rmx)); 1874 } 1875 1876 /* 1877 * Clear all the TAO cache entries, called from tcp_init. 1878 * 1879 * XXX 1880 * This routine is just an empty one, because we assume that the routing 1881 * routing tables are initialized at the same time when TCP, so there is 1882 * nothing in the cache left over. 1883 */ 1884 static void 1885 tcp_cleartaocache(void) 1886 { 1887 } 1888 1889 /* 1890 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1891 * 1892 * This code attempts to calculate the bandwidth-delay product as a 1893 * means of determining the optimal window size to maximize bandwidth, 1894 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1895 * routers. This code also does a fairly good job keeping RTTs in check 1896 * across slow links like modems. We implement an algorithm which is very 1897 * similar (but not meant to be) TCP/Vegas. The code operates on the 1898 * transmitter side of a TCP connection and so only effects the transmit 1899 * side of the connection. 1900 * 1901 * BACKGROUND: TCP makes no provision for the management of buffer space 1902 * at the end points or at the intermediate routers and switches. A TCP 1903 * stream, whether using NewReno or not, will eventually buffer as 1904 * many packets as it is able and the only reason this typically works is 1905 * due to the fairly small default buffers made available for a connection 1906 * (typicaly 16K or 32K). As machines use larger windows and/or window 1907 * scaling it is now fairly easy for even a single TCP connection to blow-out 1908 * all available buffer space not only on the local interface, but on 1909 * intermediate routers and switches as well. NewReno makes a misguided 1910 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1911 * then backing off, then steadily increasing the window again until another 1912 * failure occurs, ad-infinitum. This results in terrible oscillation that 1913 * is only made worse as network loads increase and the idea of intentionally 1914 * blowing out network buffers is, frankly, a terrible way to manage network 1915 * resources. 1916 * 1917 * It is far better to limit the transmit window prior to the failure 1918 * condition being achieved. There are two general ways to do this: First 1919 * you can 'scan' through different transmit window sizes and locate the 1920 * point where the RTT stops increasing, indicating that you have filled the 1921 * pipe, then scan backwards until you note that RTT stops decreasing, then 1922 * repeat ad-infinitum. This method works in principle but has severe 1923 * implementation issues due to RTT variances, timer granularity, and 1924 * instability in the algorithm which can lead to many false positives and 1925 * create oscillations as well as interact badly with other TCP streams 1926 * implementing the same algorithm. 1927 * 1928 * The second method is to limit the window to the bandwidth delay product 1929 * of the link. This is the method we implement. RTT variances and our 1930 * own manipulation of the congestion window, bwnd, can potentially 1931 * destabilize the algorithm. For this reason we have to stabilize the 1932 * elements used to calculate the window. We do this by using the minimum 1933 * observed RTT, the long term average of the observed bandwidth, and 1934 * by adding two segments worth of slop. It isn't perfect but it is able 1935 * to react to changing conditions and gives us a very stable basis on 1936 * which to extend the algorithm. 1937 */ 1938 void 1939 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1940 { 1941 u_long bw; 1942 u_long bwnd; 1943 int save_ticks; 1944 int delta_ticks; 1945 1946 /* 1947 * If inflight_enable is disabled in the middle of a tcp connection, 1948 * make sure snd_bwnd is effectively disabled. 1949 */ 1950 if (!tcp_inflight_enable) { 1951 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1952 tp->snd_bandwidth = 0; 1953 return; 1954 } 1955 1956 /* 1957 * Validate the delta time. If a connection is new or has been idle 1958 * a long time we have to reset the bandwidth calculator. 1959 */ 1960 save_ticks = ticks; 1961 delta_ticks = save_ticks - tp->t_bw_rtttime; 1962 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) { 1963 tp->t_bw_rtttime = ticks; 1964 tp->t_bw_rtseq = ack_seq; 1965 if (tp->snd_bandwidth == 0) 1966 tp->snd_bandwidth = tcp_inflight_min; 1967 return; 1968 } 1969 if (delta_ticks == 0) 1970 return; 1971 1972 /* 1973 * Sanity check, plus ignore pure window update acks. 1974 */ 1975 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0) 1976 return; 1977 1978 /* 1979 * Figure out the bandwidth. Due to the tick granularity this 1980 * is a very rough number and it MUST be averaged over a fairly 1981 * long period of time. XXX we need to take into account a link 1982 * that is not using all available bandwidth, but for now our 1983 * slop will ramp us up if this case occurs and the bandwidth later 1984 * increases. 1985 */ 1986 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks; 1987 tp->t_bw_rtttime = save_ticks; 1988 tp->t_bw_rtseq = ack_seq; 1989 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1990 1991 tp->snd_bandwidth = bw; 1992 1993 /* 1994 * Calculate the semi-static bandwidth delay product, plus two maximal 1995 * segments. The additional slop puts us squarely in the sweet 1996 * spot and also handles the bandwidth run-up case. Without the 1997 * slop we could be locking ourselves into a lower bandwidth. 1998 * 1999 * Situations Handled: 2000 * (1) Prevents over-queueing of packets on LANs, especially on 2001 * high speed LANs, allowing larger TCP buffers to be 2002 * specified, and also does a good job preventing 2003 * over-queueing of packets over choke points like modems 2004 * (at least for the transmit side). 2005 * 2006 * (2) Is able to handle changing network loads (bandwidth 2007 * drops so bwnd drops, bandwidth increases so bwnd 2008 * increases). 2009 * 2010 * (3) Theoretically should stabilize in the face of multiple 2011 * connections implementing the same algorithm (this may need 2012 * a little work). 2013 * 2014 * (4) Stability value (defaults to 20 = 2 maximal packets) can 2015 * be adjusted with a sysctl but typically only needs to be on 2016 * very slow connections. A value no smaller then 5 should 2017 * be used, but only reduce this default if you have no other 2018 * choice. 2019 */ 2020 2021 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 2022 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 2023 tcp_inflight_stab * (int)tp->t_maxseg / 10; 2024 #undef USERTT 2025 2026 if (tcp_inflight_debug > 0) { 2027 static int ltime; 2028 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 2029 ltime = ticks; 2030 kprintf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 2031 tp, bw, tp->t_rttbest, tp->t_srtt, bwnd); 2032 } 2033 } 2034 if ((long)bwnd < tcp_inflight_min) 2035 bwnd = tcp_inflight_min; 2036 if (bwnd > tcp_inflight_max) 2037 bwnd = tcp_inflight_max; 2038 if ((long)bwnd < tp->t_maxseg * 2) 2039 bwnd = tp->t_maxseg * 2; 2040 tp->snd_bwnd = bwnd; 2041 } 2042