1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 36 * 37 * License terms: all terms for the DragonFly license above plus the following: 38 * 39 * 4. All advertising materials mentioning features or use of this software 40 * must display the following acknowledgement: 41 * 42 * This product includes software developed by Jeffrey M. Hsu 43 * for the DragonFly Project. 44 * 45 * This requirement may be waived with permission from Jeffrey Hsu. 46 * This requirement will sunset and may be removed on July 8 2005, 47 * after which the standard DragonFly license (as shown above) will 48 * apply. 49 */ 50 51 /* 52 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 53 * The Regents of the University of California. All rights reserved. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 3. All advertising materials mentioning features or use of this software 64 * must display the following acknowledgement: 65 * This product includes software developed by the University of 66 * California, Berkeley and its contributors. 67 * 4. Neither the name of the University nor the names of its contributors 68 * may be used to endorse or promote products derived from this software 69 * without specific prior written permission. 70 * 71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 81 * SUCH DAMAGE. 82 * 83 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 84 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 85 * $DragonFly: src/sys/netinet/tcp_subr.c,v 1.38 2004/08/11 02:36:22 dillon Exp $ 86 */ 87 88 #include "opt_compat.h" 89 #include "opt_inet6.h" 90 #include "opt_ipsec.h" 91 #include "opt_tcpdebug.h" 92 93 #include <sys/param.h> 94 #include <sys/systm.h> 95 #include <sys/callout.h> 96 #include <sys/kernel.h> 97 #include <sys/sysctl.h> 98 #include <sys/malloc.h> 99 #include <sys/mpipe.h> 100 #include <sys/mbuf.h> 101 #ifdef INET6 102 #include <sys/domain.h> 103 #endif 104 #include <sys/proc.h> 105 #include <sys/socket.h> 106 #include <sys/socketvar.h> 107 #include <sys/protosw.h> 108 #include <sys/random.h> 109 #include <sys/in_cksum.h> 110 111 #include <vm/vm_zone.h> 112 113 #include <net/route.h> 114 #include <net/if.h> 115 #include <net/netisr.h> 116 117 #define _IP_VHL 118 #include <netinet/in.h> 119 #include <netinet/in_systm.h> 120 #include <netinet/ip.h> 121 #include <netinet/ip6.h> 122 #include <netinet/in_pcb.h> 123 #include <netinet6/in6_pcb.h> 124 #include <netinet/in_var.h> 125 #include <netinet/ip_var.h> 126 #include <netinet6/ip6_var.h> 127 #include <netinet/tcp.h> 128 #include <netinet/tcp_fsm.h> 129 #include <netinet/tcp_seq.h> 130 #include <netinet/tcp_timer.h> 131 #include <netinet/tcp_var.h> 132 #include <netinet6/tcp6_var.h> 133 #include <netinet/tcpip.h> 134 #ifdef TCPDEBUG 135 #include <netinet/tcp_debug.h> 136 #endif 137 #include <netinet6/ip6protosw.h> 138 139 #ifdef IPSEC 140 #include <netinet6/ipsec.h> 141 #ifdef INET6 142 #include <netinet6/ipsec6.h> 143 #endif 144 #endif 145 146 #ifdef FAST_IPSEC 147 #include <netipsec/ipsec.h> 148 #ifdef INET6 149 #include <netipsec/ipsec6.h> 150 #endif 151 #define IPSEC 152 #endif 153 154 #include <sys/md5.h> 155 156 #include <sys/msgport2.h> 157 158 #include <machine/smp.h> 159 160 struct inpcbinfo tcbinfo[MAXCPU]; 161 struct tcpcbackqhead tcpcbackq[MAXCPU]; 162 163 int tcp_mssdflt = TCP_MSS; 164 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 165 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); 166 167 #ifdef INET6 168 int tcp_v6mssdflt = TCP6_MSS; 169 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, 170 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); 171 #endif 172 173 #if 0 174 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 175 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 176 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); 177 #endif 178 179 int tcp_do_rfc1323 = 1; 180 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 181 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); 182 183 int tcp_do_rfc1644 = 0; 184 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 185 &tcp_do_rfc1644, 0, "Enable rfc1644 (TTCP) extensions"); 186 187 static int tcp_tcbhashsize = 0; 188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 189 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); 190 191 static int do_tcpdrain = 1; 192 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 193 "Enable tcp_drain routine for extra help when low on mbufs"); 194 195 /* XXX JH */ 196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 197 &tcbinfo[0].ipi_count, 0, "Number of active PCBs"); 198 199 static int icmp_may_rst = 1; 200 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 201 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 202 203 static int tcp_isn_reseed_interval = 0; 204 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 205 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 206 207 /* 208 * TCP bandwidth limiting sysctls. Note that the default lower bound of 209 * 1024 exists only for debugging. A good production default would be 210 * something like 6100. 211 */ 212 static int tcp_inflight_enable = 0; 213 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 214 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 215 216 static int tcp_inflight_debug = 0; 217 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 218 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 219 220 static int tcp_inflight_min = 6144; 221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 222 &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); 223 224 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 226 &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); 227 228 static int tcp_inflight_stab = 20; 229 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 230 &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 2 packets)"); 231 232 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); 233 static struct malloc_pipe tcptemp_mpipe; 234 235 static void tcp_willblock(void); 236 static void tcp_cleartaocache (void); 237 static void tcp_notify (struct inpcb *, int); 238 239 struct tcp_stats tcpstats_ary[MAXCPU]; 240 #ifdef SMP 241 static int 242 sysctl_tcpstats(SYSCTL_HANDLER_ARGS) 243 { 244 int cpu, error = 0; 245 246 for (cpu = 0; cpu < ncpus; ++cpu) { 247 if ((error = SYSCTL_OUT(req, (void *)&tcpstats_ary[cpu], 248 sizeof(struct tcp_stats)))) 249 break; 250 if ((error = SYSCTL_IN(req, (void *)&tcpstats_ary[cpu], 251 sizeof(struct tcp_stats)))) 252 break; 253 } 254 255 return (error); 256 } 257 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 258 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); 259 #else 260 SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 261 &tcpstat, tcp_stats, "TCP statistics"); 262 #endif 263 264 /* 265 * Target size of TCP PCB hash tables. Must be a power of two. 266 * 267 * Note that this can be overridden by the kernel environment 268 * variable net.inet.tcp.tcbhashsize 269 */ 270 #ifndef TCBHASHSIZE 271 #define TCBHASHSIZE 512 272 #endif 273 274 /* 275 * This is the actual shape of what we allocate using the zone 276 * allocator. Doing it this way allows us to protect both structures 277 * using the same generation count, and also eliminates the overhead 278 * of allocating tcpcbs separately. By hiding the structure here, 279 * we avoid changing most of the rest of the code (although it needs 280 * to be changed, eventually, for greater efficiency). 281 */ 282 #define ALIGNMENT 32 283 #define ALIGNM1 (ALIGNMENT - 1) 284 struct inp_tp { 285 union { 286 struct inpcb inp; 287 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 288 } inp_tp_u; 289 struct tcpcb tcb; 290 struct callout inp_tp_rexmt, inp_tp_persist, inp_tp_keep, inp_tp_2msl; 291 struct callout inp_tp_delack; 292 }; 293 #undef ALIGNMENT 294 #undef ALIGNM1 295 296 /* 297 * Tcp initialization 298 */ 299 void 300 tcp_init() 301 { 302 struct inpcbporthead *porthashbase; 303 u_long porthashmask; 304 struct vm_zone *ipi_zone; 305 int hashsize = TCBHASHSIZE; 306 int cpu; 307 308 /* 309 * note: tcptemp is used for keepalives, and it is ok for an 310 * allocation to fail so do not specify MPF_INT. 311 */ 312 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), 313 25, -1, 0, NULL); 314 315 tcp_ccgen = 1; 316 tcp_cleartaocache(); 317 318 tcp_delacktime = TCPTV_DELACK; 319 tcp_keepinit = TCPTV_KEEP_INIT; 320 tcp_keepidle = TCPTV_KEEP_IDLE; 321 tcp_keepintvl = TCPTV_KEEPINTVL; 322 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 323 tcp_msl = TCPTV_MSL; 324 tcp_rexmit_min = TCPTV_MIN; 325 tcp_rexmit_slop = TCPTV_CPU_VAR; 326 327 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 328 if (!powerof2(hashsize)) { 329 printf("WARNING: TCB hash size not a power of 2\n"); 330 hashsize = 512; /* safe default */ 331 } 332 tcp_tcbhashsize = hashsize; 333 porthashbase = hashinit(hashsize, M_PCB, &porthashmask); 334 ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets, 335 ZONE_INTERRUPT, 0); 336 337 for (cpu = 0; cpu < ncpus2; cpu++) { 338 in_pcbinfo_init(&tcbinfo[cpu]); 339 tcbinfo[cpu].cpu = cpu; 340 tcbinfo[cpu].hashbase = hashinit(hashsize, M_PCB, 341 &tcbinfo[cpu].hashmask); 342 tcbinfo[cpu].porthashbase = porthashbase; 343 tcbinfo[cpu].porthashmask = porthashmask; 344 tcbinfo[cpu].wildcardhashbase = hashinit(hashsize, M_PCB, 345 &tcbinfo[cpu].wildcardhashmask); 346 tcbinfo[cpu].ipi_zone = ipi_zone; 347 TAILQ_INIT(&tcpcbackq[cpu]); 348 } 349 350 tcp_reass_maxseg = nmbclusters / 16; 351 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); 352 353 #ifdef INET6 354 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 355 #else 356 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 357 #endif 358 if (max_protohdr < TCP_MINPROTOHDR) 359 max_protohdr = TCP_MINPROTOHDR; 360 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 361 panic("tcp_init"); 362 #undef TCP_MINPROTOHDR 363 364 /* 365 * Initialize TCP statistics. 366 * 367 * It is layed out as an array which is has one element for UP, 368 * and SMP_MAXCPU elements for SMP. This allows us to retain 369 * the access mechanism from userland for both UP and SMP. 370 */ 371 #ifdef SMP 372 for (cpu = 0; cpu < ncpus; ++cpu) { 373 bzero(&tcpstats_ary[cpu], sizeof(struct tcp_stats)); 374 } 375 #else 376 bzero(&tcpstat, sizeof(struct tcp_stats)); 377 #endif 378 379 syncache_init(); 380 tcp_thread_init(); 381 } 382 383 void 384 tcpmsg_service_loop(void *dummy) 385 { 386 struct netmsg *msg; 387 388 while ((msg = lwkt_waitport(&curthread->td_msgport, NULL))) { 389 do { 390 msg->nm_lmsg.ms_cmd.cm_func(&msg->nm_lmsg); 391 } while ((msg = lwkt_getport(&curthread->td_msgport)) != NULL); 392 tcp_willblock(); 393 } 394 } 395 396 static void 397 tcp_willblock(void) 398 { 399 struct tcpcb *tp; 400 int cpu = mycpu->gd_cpuid; 401 402 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) { 403 KKASSERT(tp->t_flags & TF_ONOUTPUTQ); 404 tp->t_flags &= ~TF_ONOUTPUTQ; 405 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq); 406 tcp_output(tp); 407 } 408 } 409 410 411 /* 412 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 413 * tcp_template used to store this data in mbufs, but we now recopy it out 414 * of the tcpcb each time to conserve mbufs. 415 */ 416 void 417 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) 418 { 419 struct inpcb *inp = tp->t_inpcb; 420 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 421 422 #ifdef INET6 423 if (inp->inp_vflag & INP_IPV6) { 424 struct ip6_hdr *ip6; 425 426 ip6 = (struct ip6_hdr *)ip_ptr; 427 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 428 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 429 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 430 (IPV6_VERSION & IPV6_VERSION_MASK); 431 ip6->ip6_nxt = IPPROTO_TCP; 432 ip6->ip6_plen = sizeof(struct tcphdr); 433 ip6->ip6_src = inp->in6p_laddr; 434 ip6->ip6_dst = inp->in6p_faddr; 435 tcp_hdr->th_sum = 0; 436 } else 437 #endif 438 { 439 struct ip *ip = (struct ip *) ip_ptr; 440 441 ip->ip_vhl = IP_VHL_BORING; 442 ip->ip_tos = 0; 443 ip->ip_len = 0; 444 ip->ip_id = 0; 445 ip->ip_off = 0; 446 ip->ip_ttl = 0; 447 ip->ip_sum = 0; 448 ip->ip_p = IPPROTO_TCP; 449 ip->ip_src = inp->inp_laddr; 450 ip->ip_dst = inp->inp_faddr; 451 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, 452 ip->ip_dst.s_addr, 453 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 454 } 455 456 tcp_hdr->th_sport = inp->inp_lport; 457 tcp_hdr->th_dport = inp->inp_fport; 458 tcp_hdr->th_seq = 0; 459 tcp_hdr->th_ack = 0; 460 tcp_hdr->th_x2 = 0; 461 tcp_hdr->th_off = 5; 462 tcp_hdr->th_flags = 0; 463 tcp_hdr->th_win = 0; 464 tcp_hdr->th_urp = 0; 465 } 466 467 /* 468 * Create template to be used to send tcp packets on a connection. 469 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 470 * use for this function is in keepalives, which use tcp_respond. 471 */ 472 struct tcptemp * 473 tcp_maketemplate(struct tcpcb *tp) 474 { 475 struct tcptemp *tmp; 476 477 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) 478 return (NULL); 479 tcp_fillheaders(tp, (void *)&tmp->tt_ipgen, (void *)&tmp->tt_t); 480 return (tmp); 481 } 482 483 void 484 tcp_freetemplate(struct tcptemp *tmp) 485 { 486 mpipe_free(&tcptemp_mpipe, tmp); 487 } 488 489 /* 490 * Send a single message to the TCP at address specified by 491 * the given TCP/IP header. If m == NULL, then we make a copy 492 * of the tcpiphdr at ti and send directly to the addressed host. 493 * This is used to force keep alive messages out using the TCP 494 * template for a connection. If flags are given then we send 495 * a message back to the TCP which originated the * segment ti, 496 * and discard the mbuf containing it and any other attached mbufs. 497 * 498 * In any case the ack and sequence number of the transmitted 499 * segment are as specified by the parameters. 500 * 501 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 502 */ 503 void 504 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 505 tcp_seq ack, tcp_seq seq, int flags) 506 { 507 int tlen; 508 int win = 0; 509 struct route *ro = NULL; 510 struct route sro; 511 struct ip *ip = ipgen; 512 struct tcphdr *nth; 513 int ipflags = 0; 514 struct route_in6 *ro6 = NULL; 515 struct route_in6 sro6; 516 struct ip6_hdr *ip6 = ipgen; 517 #ifdef INET6 518 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); 519 #else 520 const boolean_t isipv6 = FALSE; 521 #endif 522 523 if (tp != NULL) { 524 if (!(flags & TH_RST)) { 525 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 526 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 527 win = (long)TCP_MAXWIN << tp->rcv_scale; 528 } 529 if (isipv6) 530 ro6 = &tp->t_inpcb->in6p_route; 531 else 532 ro = &tp->t_inpcb->inp_route; 533 } else { 534 if (isipv6) { 535 ro6 = &sro6; 536 bzero(ro6, sizeof *ro6); 537 } else { 538 ro = &sro; 539 bzero(ro, sizeof *ro); 540 } 541 } 542 if (m == NULL) { 543 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 544 if (m == NULL) 545 return; 546 tlen = 0; 547 m->m_data += max_linkhdr; 548 if (isipv6) { 549 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); 550 ip6 = mtod(m, struct ip6_hdr *); 551 nth = (struct tcphdr *)(ip6 + 1); 552 } else { 553 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); 554 ip = mtod(m, struct ip *); 555 nth = (struct tcphdr *)(ip + 1); 556 } 557 bcopy(th, nth, sizeof(struct tcphdr)); 558 flags = TH_ACK; 559 } else { 560 m_freem(m->m_next); 561 m->m_next = NULL; 562 m->m_data = (caddr_t)ipgen; 563 /* m_len is set later */ 564 tlen = 0; 565 #define xchg(a, b, type) { type t; t = a; a = b; b = t; } 566 if (isipv6) { 567 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 568 nth = (struct tcphdr *)(ip6 + 1); 569 } else { 570 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 571 nth = (struct tcphdr *)(ip + 1); 572 } 573 if (th != nth) { 574 /* 575 * this is usually a case when an extension header 576 * exists between the IPv6 header and the 577 * TCP header. 578 */ 579 nth->th_sport = th->th_sport; 580 nth->th_dport = th->th_dport; 581 } 582 xchg(nth->th_dport, nth->th_sport, n_short); 583 #undef xchg 584 } 585 if (isipv6) { 586 ip6->ip6_flow = 0; 587 ip6->ip6_vfc = IPV6_VERSION; 588 ip6->ip6_nxt = IPPROTO_TCP; 589 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); 590 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 591 } else { 592 tlen += sizeof(struct tcpiphdr); 593 ip->ip_len = tlen; 594 ip->ip_ttl = ip_defttl; 595 } 596 m->m_len = tlen; 597 m->m_pkthdr.len = tlen; 598 m->m_pkthdr.rcvif = (struct ifnet *) NULL; 599 nth->th_seq = htonl(seq); 600 nth->th_ack = htonl(ack); 601 nth->th_x2 = 0; 602 nth->th_off = sizeof(struct tcphdr) >> 2; 603 nth->th_flags = flags; 604 if (tp != NULL) 605 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 606 else 607 nth->th_win = htons((u_short)win); 608 nth->th_urp = 0; 609 if (isipv6) { 610 nth->th_sum = 0; 611 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 612 sizeof(struct ip6_hdr), 613 tlen - sizeof(struct ip6_hdr)); 614 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 615 (ro6 && ro6->ro_rt) ? 616 ro6->ro_rt->rt_ifp : NULL); 617 } else { 618 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 619 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 620 m->m_pkthdr.csum_flags = CSUM_TCP; 621 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 622 } 623 #ifdef TCPDEBUG 624 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 625 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 626 #endif 627 if (isipv6) { 628 (void)ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 629 tp ? tp->t_inpcb : NULL); 630 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { 631 RTFREE(ro6->ro_rt); 632 ro6->ro_rt = NULL; 633 } 634 } else { 635 (void)ip_output(m, NULL, ro, ipflags, NULL, 636 tp ? tp->t_inpcb : NULL); 637 if ((ro == &sro) && (ro->ro_rt != NULL)) { 638 RTFREE(ro->ro_rt); 639 ro->ro_rt = NULL; 640 } 641 } 642 } 643 644 /* 645 * Create a new TCP control block, making an 646 * empty reassembly queue and hooking it to the argument 647 * protocol control block. The `inp' parameter must have 648 * come from the zone allocator set up in tcp_init(). 649 */ 650 struct tcpcb * 651 tcp_newtcpcb(struct inpcb *inp) 652 { 653 struct inp_tp *it; 654 struct tcpcb *tp; 655 #ifdef INET6 656 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 657 #else 658 const boolean_t isipv6 = FALSE; 659 #endif 660 661 it = (struct inp_tp *)inp; 662 tp = &it->tcb; 663 bzero(tp, sizeof(struct tcpcb)); 664 LIST_INIT(&tp->t_segq); 665 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 666 667 /* Set up our timeouts. */ 668 callout_init(tp->tt_rexmt = &it->inp_tp_rexmt); 669 callout_init(tp->tt_persist = &it->inp_tp_persist); 670 callout_init(tp->tt_keep = &it->inp_tp_keep); 671 callout_init(tp->tt_2msl = &it->inp_tp_2msl); 672 callout_init(tp->tt_delack = &it->inp_tp_delack); 673 674 if (tcp_do_rfc1323) 675 tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); 676 if (tcp_do_rfc1644) 677 tp->t_flags |= TF_REQ_CC; 678 tp->t_inpcb = inp; /* XXX */ 679 tp->t_state = TCPS_CLOSED; 680 /* 681 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 682 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 683 * reasonable initial retransmit time. 684 */ 685 tp->t_srtt = TCPTV_SRTTBASE; 686 tp->t_rttvar = 687 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 688 tp->t_rttmin = tcp_rexmit_min; 689 tp->t_rxtcur = TCPTV_RTOBASE; 690 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 691 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 692 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 693 tp->t_rcvtime = ticks; 694 tp->t_bw_rtttime = ticks; 695 /* 696 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 697 * because the socket may be bound to an IPv6 wildcard address, 698 * which may match an IPv4-mapped IPv6 address. 699 */ 700 inp->inp_ip_ttl = ip_defttl; 701 inp->inp_ppcb = (caddr_t)tp; 702 return (tp); /* XXX */ 703 } 704 705 /* 706 * Drop a TCP connection, reporting the specified error. 707 * If connection is synchronized, then send a RST to peer. 708 */ 709 struct tcpcb * 710 tcp_drop(struct tcpcb *tp, int errno) 711 { 712 struct socket *so = tp->t_inpcb->inp_socket; 713 714 if (TCPS_HAVERCVDSYN(tp->t_state)) { 715 tp->t_state = TCPS_CLOSED; 716 (void) tcp_output(tp); 717 tcpstat.tcps_drops++; 718 } else 719 tcpstat.tcps_conndrops++; 720 if (errno == ETIMEDOUT && tp->t_softerror) 721 errno = tp->t_softerror; 722 so->so_error = errno; 723 return (tcp_close(tp)); 724 } 725 726 #ifdef SMP 727 728 struct netmsg_remwildcard { 729 struct lwkt_msg nm_lmsg; 730 struct inpcb *nm_inp; 731 struct inpcbinfo *nm_pcbinfo; 732 #if defined(INET6) 733 int nm_isinet6; 734 #else 735 int nm_unused01; 736 #endif 737 }; 738 739 /* 740 * Wildcard inpcb's on SMP boxes must be removed from all cpus before the 741 * inp can be detached. We do this by cycling through the cpus, ending up 742 * on the cpu controlling the inp last and then doing the disconnect. 743 */ 744 static int 745 in_pcbremwildcardhash_handler(struct lwkt_msg *msg0) 746 { 747 struct netmsg_remwildcard *msg = (struct netmsg_remwildcard *)msg0; 748 int cpu; 749 750 cpu = msg->nm_pcbinfo->cpu; 751 752 if (cpu == msg->nm_inp->inp_pcbinfo->cpu) { 753 /* note: detach removes any wildcard hash entry */ 754 #ifdef INET6 755 if (msg->nm_isinet6) 756 in6_pcbdetach(msg->nm_inp); 757 else 758 #endif 759 in_pcbdetach(msg->nm_inp); 760 lwkt_replymsg(&msg->nm_lmsg, 0); 761 } else { 762 in_pcbremwildcardhash_oncpu(msg->nm_inp, msg->nm_pcbinfo); 763 cpu = (cpu + 1) % ncpus2; 764 msg->nm_pcbinfo = &tcbinfo[cpu]; 765 lwkt_forwardmsg(tcp_cport(cpu), &msg->nm_lmsg); 766 } 767 return (EASYNC); 768 } 769 770 #endif 771 772 /* 773 * Close a TCP control block: 774 * discard all space held by the tcp 775 * discard internet protocol block 776 * wake up any sleepers 777 */ 778 struct tcpcb * 779 tcp_close(struct tcpcb *tp) 780 { 781 struct tseg_qent *q; 782 struct inpcb *inp = tp->t_inpcb; 783 struct socket *so = inp->inp_socket; 784 struct rtentry *rt; 785 boolean_t dosavessthresh; 786 #ifdef SMP 787 int cpu; 788 #endif 789 #ifdef INET6 790 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 791 boolean_t isafinet6 = (INP_CHECK_SOCKAF(so, AF_INET6) != 0); 792 #else 793 const boolean_t isipv6 = FALSE; 794 #endif 795 796 /* 797 * The tp is not instantly destroyed in the wildcard case. Setting 798 * the state to TCPS_TERMINATING will prevent the TCP stack from 799 * messing with it, though it should be noted that this change may 800 * not take effect on other cpus until we have chained the wildcard 801 * hash removal. 802 * 803 * XXX we currently depend on the BGL to synchronize the tp->t_state 804 * update and prevent other tcp protocol threads from accepting new 805 * connections on the listen socket we might be trying to close down. 806 */ 807 KKASSERT(tp->t_state != TCPS_TERMINATING); 808 tp->t_state = TCPS_TERMINATING; 809 810 /* 811 * Make sure that all of our timers are stopped before we 812 * delete the PCB. 813 */ 814 callout_stop(tp->tt_rexmt); 815 callout_stop(tp->tt_persist); 816 callout_stop(tp->tt_keep); 817 callout_stop(tp->tt_2msl); 818 callout_stop(tp->tt_delack); 819 820 if (tp->t_flags & TF_ONOUTPUTQ) { 821 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); 822 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq); 823 tp->t_flags &= ~TF_ONOUTPUTQ; 824 } 825 826 /* 827 * If we got enough samples through the srtt filter, 828 * save the rtt and rttvar in the routing entry. 829 * 'Enough' is arbitrarily defined as the 16 samples. 830 * 16 samples is enough for the srtt filter to converge 831 * to within 5% of the correct value; fewer samples and 832 * we could save a very bogus rtt. 833 * 834 * Don't update the default route's characteristics and don't 835 * update anything that the user "locked". 836 */ 837 if (tp->t_rttupdated >= 16) { 838 u_long i = 0; 839 840 if (isipv6) { 841 struct sockaddr_in6 *sin6; 842 843 if ((rt = inp->in6p_route.ro_rt) == NULL) 844 goto no_valid_rt; 845 sin6 = (struct sockaddr_in6 *)rt_key(rt); 846 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 847 goto no_valid_rt; 848 } else 849 if ((rt = inp->inp_route.ro_rt) == NULL || 850 ((struct sockaddr_in *)rt_key(rt))-> 851 sin_addr.s_addr == INADDR_ANY) 852 goto no_valid_rt; 853 854 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { 855 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 856 if (rt->rt_rmx.rmx_rtt && i) 857 /* 858 * filter this update to half the old & half 859 * the new values, converting scale. 860 * See route.h and tcp_var.h for a 861 * description of the scaling constants. 862 */ 863 rt->rt_rmx.rmx_rtt = 864 (rt->rt_rmx.rmx_rtt + i) / 2; 865 else 866 rt->rt_rmx.rmx_rtt = i; 867 tcpstat.tcps_cachedrtt++; 868 } 869 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { 870 i = tp->t_rttvar * 871 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 872 if (rt->rt_rmx.rmx_rttvar && i) 873 rt->rt_rmx.rmx_rttvar = 874 (rt->rt_rmx.rmx_rttvar + i) / 2; 875 else 876 rt->rt_rmx.rmx_rttvar = i; 877 tcpstat.tcps_cachedrttvar++; 878 } 879 /* 880 * The old comment here said: 881 * update the pipelimit (ssthresh) if it has been updated 882 * already or if a pipesize was specified & the threshhold 883 * got below half the pipesize. I.e., wait for bad news 884 * before we start updating, then update on both good 885 * and bad news. 886 * 887 * But we want to save the ssthresh even if no pipesize is 888 * specified explicitly in the route, because such 889 * connections still have an implicit pipesize specified 890 * by the global tcp_sendspace. In the absence of a reliable 891 * way to calculate the pipesize, it will have to do. 892 */ 893 i = tp->snd_ssthresh; 894 if (rt->rt_rmx.rmx_sendpipe != 0) 895 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); 896 else 897 dosavessthresh = (i < so->so_snd.sb_hiwat/2); 898 if (dosavessthresh || 899 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && 900 (rt->rt_rmx.rmx_ssthresh != 0))) { 901 /* 902 * convert the limit from user data bytes to 903 * packets then to packet data bytes. 904 */ 905 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 906 if (i < 2) 907 i = 2; 908 i *= tp->t_maxseg + 909 (isipv6 ? 910 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 911 sizeof(struct tcpiphdr)); 912 if (rt->rt_rmx.rmx_ssthresh) 913 rt->rt_rmx.rmx_ssthresh = 914 (rt->rt_rmx.rmx_ssthresh + i) / 2; 915 else 916 rt->rt_rmx.rmx_ssthresh = i; 917 tcpstat.tcps_cachedssthresh++; 918 } 919 } 920 921 no_valid_rt: 922 /* free the reassembly queue, if any */ 923 while((q = LIST_FIRST(&tp->t_segq)) != NULL) { 924 LIST_REMOVE(q, tqe_q); 925 m_freem(q->tqe_m); 926 FREE(q, M_TSEGQ); 927 tcp_reass_qsize--; 928 } 929 930 inp->inp_ppcb = NULL; 931 soisdisconnected(so); 932 /* 933 * Discard the inp. In the SMP case a wildcard inp's hash (created 934 * by a listen socket or an INADDR_ANY udp socket) is replicated 935 * for each protocol thread and must be removed in the context of 936 * that thread. This is accomplished by chaining the message 937 * through the cpus. 938 * 939 * If the inp is not wildcarded we simply detach, which will remove 940 * the any hashes still present for this inp. 941 */ 942 #ifdef SMP 943 if (inp->inp_flags & INP_WILDCARD_MP) { 944 struct netmsg_remwildcard *msg; 945 946 cpu = (inp->inp_pcbinfo->cpu + 1) % ncpus2; 947 msg = malloc(sizeof(struct netmsg_remwildcard), 948 M_LWKTMSG, M_INTWAIT); 949 lwkt_initmsg(&msg->nm_lmsg, &netisr_afree_rport, 0, 950 lwkt_cmd_func(in_pcbremwildcardhash_handler), 951 lwkt_cmd_op_none); 952 #ifdef INET6 953 msg->nm_isinet6 = isafinet6; 954 #endif 955 msg->nm_inp = inp; 956 msg->nm_pcbinfo = &tcbinfo[cpu]; 957 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_lmsg); 958 } else 959 #endif 960 { 961 /* note: detach removes any wildcard hash entry */ 962 #ifdef INET6 963 if (isafinet6) 964 in6_pcbdetach(inp); 965 else 966 #endif 967 in_pcbdetach(inp); 968 } 969 tcpstat.tcps_closed++; 970 return (NULL); 971 } 972 973 static __inline void 974 tcp_drain_oncpu(struct inpcbhead *head) 975 { 976 struct inpcb *inpb; 977 struct tcpcb *tcpb; 978 struct tseg_qent *te; 979 980 LIST_FOREACH(inpb, head, inp_list) { 981 if (inpb->inp_flags & INP_PLACEMARKER) 982 continue; 983 if ((tcpb = intotcpcb(inpb))) { 984 while ((te = LIST_FIRST(&tcpb->t_segq)) != NULL) { 985 LIST_REMOVE(te, tqe_q); 986 m_freem(te->tqe_m); 987 FREE(te, M_TSEGQ); 988 tcp_reass_qsize--; 989 } 990 } 991 } 992 } 993 994 #ifdef SMP 995 struct netmsg_tcp_drain { 996 struct lwkt_msg nm_lmsg; 997 struct inpcbhead *nm_head; 998 }; 999 1000 static int 1001 tcp_drain_handler(lwkt_msg_t lmsg) 1002 { 1003 struct netmsg_tcp_drain *nm = (void *)lmsg; 1004 1005 tcp_drain_oncpu(nm->nm_head); 1006 lwkt_replymsg(lmsg, 0); 1007 return(EASYNC); 1008 } 1009 #endif 1010 1011 void 1012 tcp_drain() 1013 { 1014 #ifdef SMP 1015 int cpu; 1016 #endif 1017 1018 if (!do_tcpdrain) 1019 return; 1020 1021 /* 1022 * Walk the tcpbs, if existing, and flush the reassembly queue, 1023 * if there is one... 1024 * XXX: The "Net/3" implementation doesn't imply that the TCP 1025 * reassembly queue should be flushed, but in a situation 1026 * where we're really low on mbufs, this is potentially 1027 * useful. 1028 */ 1029 #ifdef SMP 1030 for (cpu = 0; cpu < ncpus2; cpu++) { 1031 struct netmsg_tcp_drain *msg; 1032 1033 if (cpu == mycpu->gd_cpuid) { 1034 tcp_drain_oncpu(&tcbinfo[cpu].pcblisthead); 1035 } else { 1036 msg = malloc(sizeof(struct netmsg_tcp_drain), 1037 M_LWKTMSG, M_NOWAIT); 1038 if (msg == NULL) 1039 continue; 1040 lwkt_initmsg(&msg->nm_lmsg, &netisr_afree_rport, 0, 1041 lwkt_cmd_func(tcp_drain_handler), 1042 lwkt_cmd_op_none); 1043 msg->nm_head = &tcbinfo[cpu].pcblisthead; 1044 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_lmsg); 1045 } 1046 } 1047 #else 1048 tcp_drain_oncpu(&tcbinfo[0].pcblisthead); 1049 #endif 1050 } 1051 1052 /* 1053 * Notify a tcp user of an asynchronous error; 1054 * store error as soft error, but wake up user 1055 * (for now, won't do anything until can select for soft error). 1056 * 1057 * Do not wake up user since there currently is no mechanism for 1058 * reporting soft errors (yet - a kqueue filter may be added). 1059 */ 1060 static void 1061 tcp_notify(struct inpcb *inp, int error) 1062 { 1063 struct tcpcb *tp = intotcpcb(inp); 1064 1065 /* 1066 * Ignore some errors if we are hooked up. 1067 * If connection hasn't completed, has retransmitted several times, 1068 * and receives a second error, give up now. This is better 1069 * than waiting a long time to establish a connection that 1070 * can never complete. 1071 */ 1072 if (tp->t_state == TCPS_ESTABLISHED && 1073 (error == EHOSTUNREACH || error == ENETUNREACH || 1074 error == EHOSTDOWN)) { 1075 return; 1076 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1077 tp->t_softerror) 1078 tcp_drop(tp, error); 1079 else 1080 tp->t_softerror = error; 1081 #if 0 1082 wakeup((caddr_t) &so->so_timeo); 1083 sorwakeup(so); 1084 sowwakeup(so); 1085 #endif 1086 } 1087 1088 static int 1089 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1090 { 1091 int error, i, n; 1092 struct inpcb *marker; 1093 struct inpcb *inp; 1094 inp_gen_t gencnt; 1095 struct xinpgen xig; 1096 globaldata_t gd; 1097 int origcpu, ccpu; 1098 1099 error = 0; 1100 n = 0; 1101 1102 /* 1103 * The process of preparing the TCB list is too time-consuming and 1104 * resource-intensive to repeat twice on every request. 1105 */ 1106 if (req->oldptr == NULL) { 1107 for (ccpu = 0; ccpu < ncpus; ++ccpu) { 1108 gd = globaldata_find(ccpu); 1109 n += tcbinfo[gd->gd_cpuid].ipi_count; 1110 } 1111 req->oldidx = 2 * ncpus * (sizeof xig) + 1112 (n + n/8) * sizeof(struct xtcpcb); 1113 return (0); 1114 } 1115 1116 if (req->newptr != NULL) 1117 return (EPERM); 1118 1119 marker = malloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); 1120 marker->inp_flags |= INP_PLACEMARKER; 1121 1122 /* 1123 * OK, now we're committed to doing something. Run the inpcb list 1124 * for each cpu in the system and construct the output. Use a 1125 * list placemarker to deal with list changes occuring during 1126 * copyout blockages (but otherwise depend on being on the correct 1127 * cpu to avoid races). 1128 */ 1129 origcpu = mycpu->gd_cpuid; 1130 for (ccpu = 1; ccpu <= ncpus && error == 0; ++ccpu) { 1131 globaldata_t rgd; 1132 caddr_t inp_ppcb; 1133 struct xtcpcb xt; 1134 int cpu_id; 1135 1136 cpu_id = (origcpu + ccpu) % ncpus; 1137 if ((smp_active_mask & (1 << cpu_id)) == 0) 1138 continue; 1139 rgd = globaldata_find(cpu_id); 1140 lwkt_setcpu_self(rgd); 1141 1142 /* indicate change of CPU */ 1143 cpu_mb1(); 1144 1145 gencnt = tcbinfo[cpu_id].ipi_gencnt; 1146 n = tcbinfo[cpu_id].ipi_count; 1147 1148 xig.xig_len = sizeof xig; 1149 xig.xig_count = n; 1150 xig.xig_gen = gencnt; 1151 xig.xig_sogen = so_gencnt; 1152 xig.xig_cpu = cpu_id; 1153 error = SYSCTL_OUT(req, &xig, sizeof xig); 1154 if (error != 0) 1155 break; 1156 1157 LIST_INSERT_HEAD(&tcbinfo[cpu_id].pcblisthead, marker, inp_list); 1158 i = 0; 1159 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { 1160 /* 1161 * process a snapshot of pcbs, ignoring placemarkers 1162 * and using our own to allow SYSCTL_OUT to block. 1163 */ 1164 LIST_REMOVE(marker, inp_list); 1165 LIST_INSERT_AFTER(inp, marker, inp_list); 1166 1167 if (inp->inp_flags & INP_PLACEMARKER) 1168 continue; 1169 if (inp->inp_gencnt > gencnt) 1170 continue; 1171 if (prison_xinpcb(req->td, inp)) 1172 continue; 1173 1174 xt.xt_len = sizeof xt; 1175 bcopy(inp, &xt.xt_inp, sizeof *inp); 1176 inp_ppcb = inp->inp_ppcb; 1177 if (inp_ppcb != NULL) 1178 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 1179 else 1180 bzero(&xt.xt_tp, sizeof xt.xt_tp); 1181 if (inp->inp_socket) 1182 sotoxsocket(inp->inp_socket, &xt.xt_socket); 1183 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) 1184 break; 1185 ++i; 1186 } 1187 LIST_REMOVE(marker, inp_list); 1188 if (error == 0 && i < n) { 1189 bzero(&xt, sizeof(xt)); 1190 xt.xt_len = sizeof(xt); 1191 while (i < n) { 1192 error = SYSCTL_OUT(req, &xt, sizeof (xt)); 1193 if (error) 1194 break; 1195 ++i; 1196 } 1197 } 1198 if (error == 0) { 1199 /* 1200 * Give the user an updated idea of our state. 1201 * If the generation differs from what we told 1202 * her before, she knows that something happened 1203 * while we were processing this request, and it 1204 * might be necessary to retry. 1205 */ 1206 xig.xig_gen = tcbinfo[cpu_id].ipi_gencnt; 1207 xig.xig_sogen = so_gencnt; 1208 xig.xig_count = tcbinfo[cpu_id].ipi_count; 1209 error = SYSCTL_OUT(req, &xig, sizeof xig); 1210 } 1211 } 1212 1213 /* 1214 * Make sure we are on the same cpu we were on originally, since 1215 * higher level callers expect this. Also don't pollute caches with 1216 * migrated userland data by (eventually) returning to userland 1217 * on a different cpu. 1218 */ 1219 lwkt_setcpu_self(globaldata_find(origcpu)); 1220 free(marker, M_TEMP); 1221 return (error); 1222 } 1223 1224 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1225 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1226 1227 static int 1228 tcp_getcred(SYSCTL_HANDLER_ARGS) 1229 { 1230 struct sockaddr_in addrs[2]; 1231 struct inpcb *inp; 1232 int cpu; 1233 int error, s; 1234 1235 error = suser(req->td); 1236 if (error != 0) 1237 return (error); 1238 error = SYSCTL_IN(req, addrs, sizeof addrs); 1239 if (error != 0) 1240 return (error); 1241 s = splnet(); 1242 1243 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 1244 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 1245 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 1246 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1247 if (inp == NULL || inp->inp_socket == NULL) { 1248 error = ENOENT; 1249 goto out; 1250 } 1251 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1252 out: 1253 splx(s); 1254 return (error); 1255 } 1256 1257 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1258 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 1259 1260 #ifdef INET6 1261 static int 1262 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1263 { 1264 struct sockaddr_in6 addrs[2]; 1265 struct inpcb *inp; 1266 int error, s; 1267 boolean_t mapped = FALSE; 1268 1269 error = suser(req->td); 1270 if (error != 0) 1271 return (error); 1272 error = SYSCTL_IN(req, addrs, sizeof addrs); 1273 if (error != 0) 1274 return (error); 1275 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1276 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1277 mapped = TRUE; 1278 else 1279 return (EINVAL); 1280 } 1281 s = splnet(); 1282 if (mapped) { 1283 inp = in_pcblookup_hash(&tcbinfo[0], 1284 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1285 addrs[1].sin6_port, 1286 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1287 addrs[0].sin6_port, 1288 0, NULL); 1289 } else { 1290 inp = in6_pcblookup_hash(&tcbinfo[0], 1291 &addrs[1].sin6_addr, addrs[1].sin6_port, 1292 &addrs[0].sin6_addr, addrs[0].sin6_port, 1293 0, NULL); 1294 } 1295 if (inp == NULL || inp->inp_socket == NULL) { 1296 error = ENOENT; 1297 goto out; 1298 } 1299 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1300 out: 1301 splx(s); 1302 return (error); 1303 } 1304 1305 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1306 0, 0, 1307 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1308 #endif 1309 1310 void 1311 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 1312 { 1313 struct ip *ip = vip; 1314 struct tcphdr *th; 1315 struct in_addr faddr; 1316 struct inpcb *inp; 1317 struct tcpcb *tp; 1318 void (*notify)(struct inpcb *, int) = tcp_notify; 1319 tcp_seq icmp_seq; 1320 int cpu; 1321 int s; 1322 1323 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1324 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1325 return; 1326 1327 if (cmd == PRC_QUENCH) 1328 notify = tcp_quench; 1329 else if (icmp_may_rst && 1330 (cmd == PRC_UNREACH_ADMIN_PROHIB || cmd == PRC_UNREACH_PORT || 1331 cmd == PRC_TIMXCEED_INTRANS) && 1332 ip != NULL) 1333 notify = tcp_drop_syn_sent; 1334 else if (cmd == PRC_MSGSIZE) 1335 notify = tcp_mtudisc; 1336 else if (PRC_IS_REDIRECT(cmd)) { 1337 ip = NULL; 1338 notify = in_rtchange; 1339 } else if (cmd == PRC_HOSTDEAD) 1340 ip = NULL; 1341 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0) 1342 return; 1343 if (ip != NULL) { 1344 s = splnet(); 1345 th = (struct tcphdr *)((caddr_t)ip + 1346 (IP_VHL_HL(ip->ip_vhl) << 2)); 1347 cpu = tcp_addrcpu(faddr.s_addr, th->th_dport, 1348 ip->ip_src.s_addr, th->th_sport); 1349 inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport, 1350 ip->ip_src, th->th_sport, 0, NULL); 1351 if ((inp != NULL) && (inp->inp_socket != NULL)) { 1352 icmp_seq = htonl(th->th_seq); 1353 tp = intotcpcb(inp); 1354 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1355 SEQ_LT(icmp_seq, tp->snd_max)) 1356 (*notify)(inp, inetctlerrmap[cmd]); 1357 } else { 1358 struct in_conninfo inc; 1359 1360 inc.inc_fport = th->th_dport; 1361 inc.inc_lport = th->th_sport; 1362 inc.inc_faddr = faddr; 1363 inc.inc_laddr = ip->ip_src; 1364 #ifdef INET6 1365 inc.inc_isipv6 = 0; 1366 #endif 1367 syncache_unreach(&inc, th); 1368 } 1369 splx(s); 1370 } else { 1371 for (cpu = 0; cpu < ncpus2; cpu++) { 1372 in_pcbnotifyall(&tcbinfo[cpu].pcblisthead, faddr, 1373 inetctlerrmap[cmd], notify); 1374 } 1375 } 1376 } 1377 1378 #ifdef INET6 1379 void 1380 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) 1381 { 1382 struct tcphdr th; 1383 void (*notify) (struct inpcb *, int) = tcp_notify; 1384 struct ip6_hdr *ip6; 1385 struct mbuf *m; 1386 struct ip6ctlparam *ip6cp = NULL; 1387 const struct sockaddr_in6 *sa6_src = NULL; 1388 int off; 1389 struct tcp_portonly { 1390 u_int16_t th_sport; 1391 u_int16_t th_dport; 1392 } *thp; 1393 1394 if (sa->sa_family != AF_INET6 || 1395 sa->sa_len != sizeof(struct sockaddr_in6)) 1396 return; 1397 1398 if (cmd == PRC_QUENCH) 1399 notify = tcp_quench; 1400 else if (cmd == PRC_MSGSIZE) 1401 notify = tcp_mtudisc; 1402 else if (!PRC_IS_REDIRECT(cmd) && 1403 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1404 return; 1405 1406 /* if the parameter is from icmp6, decode it. */ 1407 if (d != NULL) { 1408 ip6cp = (struct ip6ctlparam *)d; 1409 m = ip6cp->ip6c_m; 1410 ip6 = ip6cp->ip6c_ip6; 1411 off = ip6cp->ip6c_off; 1412 sa6_src = ip6cp->ip6c_src; 1413 } else { 1414 m = NULL; 1415 ip6 = NULL; 1416 off = 0; /* fool gcc */ 1417 sa6_src = &sa6_any; 1418 } 1419 1420 if (ip6 != NULL) { 1421 struct in_conninfo inc; 1422 /* 1423 * XXX: We assume that when IPV6 is non NULL, 1424 * M and OFF are valid. 1425 */ 1426 1427 /* check if we can safely examine src and dst ports */ 1428 if (m->m_pkthdr.len < off + sizeof *thp) 1429 return; 1430 1431 bzero(&th, sizeof th); 1432 m_copydata(m, off, sizeof *thp, (caddr_t)&th); 1433 1434 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, th.th_dport, 1435 (struct sockaddr *)ip6cp->ip6c_src, 1436 th.th_sport, cmd, notify); 1437 1438 inc.inc_fport = th.th_dport; 1439 inc.inc_lport = th.th_sport; 1440 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1441 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1442 inc.inc_isipv6 = 1; 1443 syncache_unreach(&inc, &th); 1444 } else 1445 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, 0, 1446 (const struct sockaddr *)sa6_src, 0, cmd, notify); 1447 } 1448 #endif 1449 1450 /* 1451 * Following is where TCP initial sequence number generation occurs. 1452 * 1453 * There are two places where we must use initial sequence numbers: 1454 * 1. In SYN-ACK packets. 1455 * 2. In SYN packets. 1456 * 1457 * All ISNs for SYN-ACK packets are generated by the syncache. See 1458 * tcp_syncache.c for details. 1459 * 1460 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1461 * depends on this property. In addition, these ISNs should be 1462 * unguessable so as to prevent connection hijacking. To satisfy 1463 * the requirements of this situation, the algorithm outlined in 1464 * RFC 1948 is used to generate sequence numbers. 1465 * 1466 * Implementation details: 1467 * 1468 * Time is based off the system timer, and is corrected so that it 1469 * increases by one megabyte per second. This allows for proper 1470 * recycling on high speed LANs while still leaving over an hour 1471 * before rollover. 1472 * 1473 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1474 * between seeding of isn_secret. This is normally set to zero, 1475 * as reseeding should not be necessary. 1476 * 1477 */ 1478 1479 #define ISN_BYTES_PER_SECOND 1048576 1480 1481 u_char isn_secret[32]; 1482 int isn_last_reseed; 1483 MD5_CTX isn_ctx; 1484 1485 tcp_seq 1486 tcp_new_isn(struct tcpcb *tp) 1487 { 1488 u_int32_t md5_buffer[4]; 1489 tcp_seq new_isn; 1490 1491 /* Seed if this is the first use, reseed if requested. */ 1492 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1493 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1494 < (u_int)ticks))) { 1495 read_random_unlimited(&isn_secret, sizeof isn_secret); 1496 isn_last_reseed = ticks; 1497 } 1498 1499 /* Compute the md5 hash and return the ISN. */ 1500 MD5Init(&isn_ctx); 1501 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short)); 1502 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short)); 1503 #ifdef INET6 1504 if (tp->t_inpcb->inp_vflag & INP_IPV6) { 1505 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1506 sizeof(struct in6_addr)); 1507 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1508 sizeof(struct in6_addr)); 1509 } else 1510 #endif 1511 { 1512 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1513 sizeof(struct in_addr)); 1514 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1515 sizeof(struct in_addr)); 1516 } 1517 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1518 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1519 new_isn = (tcp_seq) md5_buffer[0]; 1520 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1521 return (new_isn); 1522 } 1523 1524 /* 1525 * When a source quench is received, close congestion window 1526 * to one segment. We will gradually open it again as we proceed. 1527 */ 1528 void 1529 tcp_quench(struct inpcb *inp, int errno) 1530 { 1531 struct tcpcb *tp = intotcpcb(inp); 1532 1533 if (tp != NULL) 1534 tp->snd_cwnd = tp->t_maxseg; 1535 } 1536 1537 /* 1538 * When a specific ICMP unreachable message is received and the 1539 * connection state is SYN-SENT, drop the connection. This behavior 1540 * is controlled by the icmp_may_rst sysctl. 1541 */ 1542 void 1543 tcp_drop_syn_sent(struct inpcb *inp, int errno) 1544 { 1545 struct tcpcb *tp = intotcpcb(inp); 1546 1547 if ((tp != NULL) && (tp->t_state == TCPS_SYN_SENT)) 1548 tcp_drop(tp, errno); 1549 } 1550 1551 /* 1552 * When `need fragmentation' ICMP is received, update our idea of the MSS 1553 * based on the new value in the route. Also nudge TCP to send something, 1554 * since we know the packet we just sent was dropped. 1555 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1556 */ 1557 void 1558 tcp_mtudisc(struct inpcb *inp, int errno) 1559 { 1560 struct tcpcb *tp = intotcpcb(inp); 1561 struct rtentry *rt; 1562 struct rmxp_tao *taop; 1563 struct socket *so = inp->inp_socket; 1564 int offered; 1565 int mss; 1566 #ifdef INET6 1567 boolean_t isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0); 1568 #else 1569 const boolean_t isipv6 = FALSE; 1570 #endif 1571 1572 if (tp != NULL) { 1573 if (isipv6) 1574 rt = tcp_rtlookup6(&inp->inp_inc); 1575 else 1576 rt = tcp_rtlookup(&inp->inp_inc); 1577 if (rt == NULL || rt->rt_rmx.rmx_mtu == 0) { 1578 tp->t_maxopd = tp->t_maxseg = 1579 isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 1580 return; 1581 } 1582 taop = rmx_taop(rt->rt_rmx); 1583 offered = taop->tao_mssopt; 1584 mss = rt->rt_rmx.rmx_mtu - 1585 (isipv6 ? 1586 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1587 sizeof(struct tcpiphdr)); 1588 1589 if (offered != 0) 1590 mss = min(mss, offered); 1591 /* 1592 * XXX - The above conditional probably violates the TCP 1593 * spec. The problem is that, since we don't know the 1594 * other end's MSS, we are supposed to use a conservative 1595 * default. But, if we do that, then MTU discovery will 1596 * never actually take place, because the conservative 1597 * default is much less than the MTUs typically seen 1598 * on the Internet today. For the moment, we'll sweep 1599 * this under the carpet. 1600 * 1601 * The conservative default might not actually be a problem 1602 * if the only case this occurs is when sending an initial 1603 * SYN with options and data to a host we've never talked 1604 * to before. Then, they will reply with an MSS value which 1605 * will get recorded and the new parameters should get 1606 * recomputed. For Further Study. 1607 */ 1608 if (tp->t_maxopd <= mss) 1609 return; 1610 tp->t_maxopd = mss; 1611 1612 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && 1613 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1614 mss -= TCPOLEN_TSTAMP_APPA; 1615 if ((tp->t_flags & (TF_REQ_CC | TF_NOOPT)) == TF_REQ_CC && 1616 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1617 mss -= TCPOLEN_CC_APPA; 1618 #if (MCLBYTES & (MCLBYTES - 1)) == 0 1619 if (mss > MCLBYTES) 1620 mss &= ~(MCLBYTES - 1); 1621 #else 1622 if (mss > MCLBYTES) 1623 mss = mss / MCLBYTES * MCLBYTES; 1624 #endif 1625 if (so->so_snd.sb_hiwat < mss) 1626 mss = so->so_snd.sb_hiwat; 1627 1628 tp->t_maxseg = mss; 1629 1630 tcpstat.tcps_mturesent++; 1631 tp->t_rtttime = 0; 1632 tp->snd_nxt = tp->snd_una; 1633 tcp_output(tp); 1634 } 1635 } 1636 1637 /* 1638 * Look-up the routing entry to the peer of this inpcb. If no route 1639 * is found and it cannot be allocated the return NULL. This routine 1640 * is called by TCP routines that access the rmx structure and by tcp_mss 1641 * to get the interface MTU. 1642 */ 1643 struct rtentry * 1644 tcp_rtlookup(struct in_conninfo *inc) 1645 { 1646 struct route *ro; 1647 struct rtentry *rt; 1648 1649 ro = &inc->inc_route; 1650 rt = ro->ro_rt; 1651 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1652 /* No route yet, so try to acquire one */ 1653 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1654 /* 1655 * unused portions of the structure MUST be zero'd 1656 * out because rtalloc() treats it as opaque data 1657 */ 1658 bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); 1659 ro->ro_dst.sa_family = AF_INET; 1660 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1661 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1662 inc->inc_faddr; 1663 rtalloc(ro); 1664 rt = ro->ro_rt; 1665 } 1666 } 1667 return (rt); 1668 } 1669 1670 #ifdef INET6 1671 struct rtentry * 1672 tcp_rtlookup6(struct in_conninfo *inc) 1673 { 1674 struct route_in6 *ro6; 1675 struct rtentry *rt; 1676 1677 ro6 = &inc->inc6_route; 1678 rt = ro6->ro_rt; 1679 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1680 /* No route yet, so try to acquire one */ 1681 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1682 /* 1683 * unused portions of the structure MUST be zero'd 1684 * out because rtalloc() treats it as opaque data 1685 */ 1686 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); 1687 ro6->ro_dst.sin6_family = AF_INET6; 1688 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1689 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1690 rtalloc((struct route *)ro6); 1691 rt = ro6->ro_rt; 1692 } 1693 } 1694 return (rt); 1695 } 1696 #endif 1697 1698 #ifdef IPSEC 1699 /* compute ESP/AH header size for TCP, including outer IP header. */ 1700 size_t 1701 ipsec_hdrsiz_tcp(struct tcpcb *tp) 1702 { 1703 struct inpcb *inp; 1704 struct mbuf *m; 1705 size_t hdrsiz; 1706 struct ip *ip; 1707 struct tcphdr *th; 1708 1709 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1710 return (0); 1711 MGETHDR(m, MB_DONTWAIT, MT_DATA); 1712 if (!m) 1713 return (0); 1714 1715 #ifdef INET6 1716 if (inp->inp_vflag & INP_IPV6) { 1717 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 1718 1719 th = (struct tcphdr *)(ip6 + 1); 1720 m->m_pkthdr.len = m->m_len = 1721 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1722 tcp_fillheaders(tp, ip6, th); 1723 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1724 } else 1725 #endif 1726 { 1727 ip = mtod(m, struct ip *); 1728 th = (struct tcphdr *)(ip + 1); 1729 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1730 tcp_fillheaders(tp, ip, th); 1731 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1732 } 1733 1734 m_free(m); 1735 return (hdrsiz); 1736 } 1737 #endif 1738 1739 /* 1740 * Return a pointer to the cached information about the remote host. 1741 * The cached information is stored in the protocol specific part of 1742 * the route metrics. 1743 */ 1744 struct rmxp_tao * 1745 tcp_gettaocache(struct in_conninfo *inc) 1746 { 1747 struct rtentry *rt; 1748 1749 #ifdef INET6 1750 if (inc->inc_isipv6) 1751 rt = tcp_rtlookup6(inc); 1752 else 1753 #endif 1754 rt = tcp_rtlookup(inc); 1755 1756 /* Make sure this is a host route and is up. */ 1757 if (rt == NULL || 1758 (rt->rt_flags & (RTF_UP | RTF_HOST)) != (RTF_UP | RTF_HOST)) 1759 return (NULL); 1760 1761 return (rmx_taop(rt->rt_rmx)); 1762 } 1763 1764 /* 1765 * Clear all the TAO cache entries, called from tcp_init. 1766 * 1767 * XXX 1768 * This routine is just an empty one, because we assume that the routing 1769 * routing tables are initialized at the same time when TCP, so there is 1770 * nothing in the cache left over. 1771 */ 1772 static void 1773 tcp_cleartaocache() 1774 { 1775 } 1776 1777 /* 1778 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1779 * 1780 * This code attempts to calculate the bandwidth-delay product as a 1781 * means of determining the optimal window size to maximize bandwidth, 1782 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1783 * routers. This code also does a fairly good job keeping RTTs in check 1784 * across slow links like modems. We implement an algorithm which is very 1785 * similar (but not meant to be) TCP/Vegas. The code operates on the 1786 * transmitter side of a TCP connection and so only effects the transmit 1787 * side of the connection. 1788 * 1789 * BACKGROUND: TCP makes no provision for the management of buffer space 1790 * at the end points or at the intermediate routers and switches. A TCP 1791 * stream, whether using NewReno or not, will eventually buffer as 1792 * many packets as it is able and the only reason this typically works is 1793 * due to the fairly small default buffers made available for a connection 1794 * (typicaly 16K or 32K). As machines use larger windows and/or window 1795 * scaling it is now fairly easy for even a single TCP connection to blow-out 1796 * all available buffer space not only on the local interface, but on 1797 * intermediate routers and switches as well. NewReno makes a misguided 1798 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1799 * then backing off, then steadily increasing the window again until another 1800 * failure occurs, ad-infinitum. This results in terrible oscillation that 1801 * is only made worse as network loads increase and the idea of intentionally 1802 * blowing out network buffers is, frankly, a terrible way to manage network 1803 * resources. 1804 * 1805 * It is far better to limit the transmit window prior to the failure 1806 * condition being achieved. There are two general ways to do this: First 1807 * you can 'scan' through different transmit window sizes and locate the 1808 * point where the RTT stops increasing, indicating that you have filled the 1809 * pipe, then scan backwards until you note that RTT stops decreasing, then 1810 * repeat ad-infinitum. This method works in principle but has severe 1811 * implementation issues due to RTT variances, timer granularity, and 1812 * instability in the algorithm which can lead to many false positives and 1813 * create oscillations as well as interact badly with other TCP streams 1814 * implementing the same algorithm. 1815 * 1816 * The second method is to limit the window to the bandwidth delay product 1817 * of the link. This is the method we implement. RTT variances and our 1818 * own manipulation of the congestion window, bwnd, can potentially 1819 * destabilize the algorithm. For this reason we have to stabilize the 1820 * elements used to calculate the window. We do this by using the minimum 1821 * observed RTT, the long term average of the observed bandwidth, and 1822 * by adding two segments worth of slop. It isn't perfect but it is able 1823 * to react to changing conditions and gives us a very stable basis on 1824 * which to extend the algorithm. 1825 */ 1826 void 1827 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1828 { 1829 u_long bw; 1830 u_long bwnd; 1831 int save_ticks; 1832 1833 /* 1834 * If inflight_enable is disabled in the middle of a tcp connection, 1835 * make sure snd_bwnd is effectively disabled. 1836 */ 1837 if (!tcp_inflight_enable) { 1838 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1839 tp->snd_bandwidth = 0; 1840 return; 1841 } 1842 1843 /* 1844 * Figure out the bandwidth. Due to the tick granularity this 1845 * is a very rough number and it MUST be averaged over a fairly 1846 * long period of time. XXX we need to take into account a link 1847 * that is not using all available bandwidth, but for now our 1848 * slop will ramp us up if this case occurs and the bandwidth later 1849 * increases. 1850 * 1851 * Note: if ticks rollover 'bw' may wind up negative. We must 1852 * effectively reset t_bw_rtttime for this case. 1853 */ 1854 save_ticks = ticks; 1855 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1856 return; 1857 1858 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1859 (save_ticks - tp->t_bw_rtttime); 1860 tp->t_bw_rtttime = save_ticks; 1861 tp->t_bw_rtseq = ack_seq; 1862 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1863 return; 1864 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1865 1866 tp->snd_bandwidth = bw; 1867 1868 /* 1869 * Calculate the semi-static bandwidth delay product, plus two maximal 1870 * segments. The additional slop puts us squarely in the sweet 1871 * spot and also handles the bandwidth run-up case. Without the 1872 * slop we could be locking ourselves into a lower bandwidth. 1873 * 1874 * Situations Handled: 1875 * (1) Prevents over-queueing of packets on LANs, especially on 1876 * high speed LANs, allowing larger TCP buffers to be 1877 * specified, and also does a good job preventing 1878 * over-queueing of packets over choke points like modems 1879 * (at least for the transmit side). 1880 * 1881 * (2) Is able to handle changing network loads (bandwidth 1882 * drops so bwnd drops, bandwidth increases so bwnd 1883 * increases). 1884 * 1885 * (3) Theoretically should stabilize in the face of multiple 1886 * connections implementing the same algorithm (this may need 1887 * a little work). 1888 * 1889 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1890 * be adjusted with a sysctl but typically only needs to be on 1891 * very slow connections. A value no smaller then 5 should 1892 * be used, but only reduce this default if you have no other 1893 * choice. 1894 */ 1895 1896 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1897 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 1898 tcp_inflight_stab * (int)tp->t_maxseg / 10; 1899 #undef USERTT 1900 1901 if (tcp_inflight_debug > 0) { 1902 static int ltime; 1903 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1904 ltime = ticks; 1905 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1906 tp, bw, tp->t_rttbest, tp->t_srtt, bwnd); 1907 } 1908 } 1909 if ((long)bwnd < tcp_inflight_min) 1910 bwnd = tcp_inflight_min; 1911 if (bwnd > tcp_inflight_max) 1912 bwnd = tcp_inflight_max; 1913 if ((long)bwnd < tp->t_maxseg * 2) 1914 bwnd = tp->t_maxseg * 2; 1915 tp->snd_bwnd = bwnd; 1916 } 1917