1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 67 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 68 * $DragonFly: src/sys/netinet/tcp_subr.c,v 1.57 2007/04/22 01:13:14 dillon Exp $ 69 */ 70 71 #include "opt_compat.h" 72 #include "opt_inet6.h" 73 #include "opt_ipsec.h" 74 #include "opt_tcpdebug.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/callout.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/malloc.h> 82 #include <sys/mpipe.h> 83 #include <sys/mbuf.h> 84 #ifdef INET6 85 #include <sys/domain.h> 86 #endif 87 #include <sys/proc.h> 88 #include <sys/socket.h> 89 #include <sys/socketvar.h> 90 #include <sys/protosw.h> 91 #include <sys/random.h> 92 #include <sys/in_cksum.h> 93 #include <sys/ktr.h> 94 95 #include <vm/vm_zone.h> 96 97 #include <net/route.h> 98 #include <net/if.h> 99 #include <net/netisr.h> 100 101 #define _IP_VHL 102 #include <netinet/in.h> 103 #include <netinet/in_systm.h> 104 #include <netinet/ip.h> 105 #include <netinet/ip6.h> 106 #include <netinet/in_pcb.h> 107 #include <netinet6/in6_pcb.h> 108 #include <netinet/in_var.h> 109 #include <netinet/ip_var.h> 110 #include <netinet6/ip6_var.h> 111 #include <netinet/ip_icmp.h> 112 #ifdef INET6 113 #include <netinet/icmp6.h> 114 #endif 115 #include <netinet/tcp.h> 116 #include <netinet/tcp_fsm.h> 117 #include <netinet/tcp_seq.h> 118 #include <netinet/tcp_timer.h> 119 #include <netinet/tcp_var.h> 120 #include <netinet6/tcp6_var.h> 121 #include <netinet/tcpip.h> 122 #ifdef TCPDEBUG 123 #include <netinet/tcp_debug.h> 124 #endif 125 #include <netinet6/ip6protosw.h> 126 127 #ifdef IPSEC 128 #include <netinet6/ipsec.h> 129 #ifdef INET6 130 #include <netinet6/ipsec6.h> 131 #endif 132 #endif 133 134 #ifdef FAST_IPSEC 135 #include <netproto/ipsec/ipsec.h> 136 #ifdef INET6 137 #include <netproto/ipsec/ipsec6.h> 138 #endif 139 #define IPSEC 140 #endif 141 142 #include <sys/md5.h> 143 #include <sys/msgport2.h> 144 #include <machine/smp.h> 145 146 #if !defined(KTR_TCP) 147 #define KTR_TCP KTR_ALL 148 #endif 149 KTR_INFO_MASTER(tcp); 150 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0); 151 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0); 152 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0); 153 #define logtcp(name) KTR_LOG(tcp_ ## name) 154 155 struct inpcbinfo tcbinfo[MAXCPU]; 156 struct tcpcbackqhead tcpcbackq[MAXCPU]; 157 158 int tcp_mssdflt = TCP_MSS; 159 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 160 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); 161 162 #ifdef INET6 163 int tcp_v6mssdflt = TCP6_MSS; 164 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, 165 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); 166 #endif 167 168 #if 0 169 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 170 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 171 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); 172 #endif 173 174 int tcp_do_rfc1323 = 1; 175 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 176 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); 177 178 int tcp_do_rfc1644 = 0; 179 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 180 &tcp_do_rfc1644, 0, "Enable rfc1644 (TTCP) extensions"); 181 182 static int tcp_tcbhashsize = 0; 183 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 184 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); 185 186 static int do_tcpdrain = 1; 187 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 188 "Enable tcp_drain routine for extra help when low on mbufs"); 189 190 /* XXX JH */ 191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 192 &tcbinfo[0].ipi_count, 0, "Number of active PCBs"); 193 194 static int icmp_may_rst = 1; 195 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 196 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 197 198 static int tcp_isn_reseed_interval = 0; 199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 200 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 201 202 /* 203 * TCP bandwidth limiting sysctls. Note that the default lower bound of 204 * 1024 exists only for debugging. A good production default would be 205 * something like 6100. 206 */ 207 static int tcp_inflight_enable = 0; 208 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 209 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 210 211 static int tcp_inflight_debug = 0; 212 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 213 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 214 215 static int tcp_inflight_min = 6144; 216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 217 &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); 218 219 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 220 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 221 &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); 222 223 static int tcp_inflight_stab = 20; 224 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 225 &tcp_inflight_stab, 0, "Slop in maximal packets / 10 (20 = 2 packets)"); 226 227 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); 228 static struct malloc_pipe tcptemp_mpipe; 229 230 static void tcp_willblock(void); 231 static void tcp_cleartaocache (void); 232 static void tcp_notify (struct inpcb *, int); 233 234 struct tcp_stats tcpstats_percpu[MAXCPU]; 235 #ifdef SMP 236 static int 237 sysctl_tcpstats(SYSCTL_HANDLER_ARGS) 238 { 239 int cpu, error = 0; 240 241 for (cpu = 0; cpu < ncpus; ++cpu) { 242 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu], 243 sizeof(struct tcp_stats)))) 244 break; 245 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu], 246 sizeof(struct tcp_stats)))) 247 break; 248 } 249 250 return (error); 251 } 252 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 253 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); 254 #else 255 SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 256 &tcpstat, tcp_stats, "TCP statistics"); 257 #endif 258 259 /* 260 * Target size of TCP PCB hash tables. Must be a power of two. 261 * 262 * Note that this can be overridden by the kernel environment 263 * variable net.inet.tcp.tcbhashsize 264 */ 265 #ifndef TCBHASHSIZE 266 #define TCBHASHSIZE 512 267 #endif 268 269 /* 270 * This is the actual shape of what we allocate using the zone 271 * allocator. Doing it this way allows us to protect both structures 272 * using the same generation count, and also eliminates the overhead 273 * of allocating tcpcbs separately. By hiding the structure here, 274 * we avoid changing most of the rest of the code (although it needs 275 * to be changed, eventually, for greater efficiency). 276 */ 277 #define ALIGNMENT 32 278 #define ALIGNM1 (ALIGNMENT - 1) 279 struct inp_tp { 280 union { 281 struct inpcb inp; 282 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 283 } inp_tp_u; 284 struct tcpcb tcb; 285 struct callout inp_tp_rexmt, inp_tp_persist, inp_tp_keep, inp_tp_2msl; 286 struct callout inp_tp_delack; 287 }; 288 #undef ALIGNMENT 289 #undef ALIGNM1 290 291 /* 292 * Tcp initialization 293 */ 294 void 295 tcp_init(void) 296 { 297 struct inpcbporthead *porthashbase; 298 u_long porthashmask; 299 struct vm_zone *ipi_zone; 300 int hashsize = TCBHASHSIZE; 301 int cpu; 302 303 /* 304 * note: tcptemp is used for keepalives, and it is ok for an 305 * allocation to fail so do not specify MPF_INT. 306 */ 307 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), 308 25, -1, 0, NULL); 309 310 tcp_ccgen = 1; 311 tcp_cleartaocache(); 312 313 tcp_delacktime = TCPTV_DELACK; 314 tcp_keepinit = TCPTV_KEEP_INIT; 315 tcp_keepidle = TCPTV_KEEP_IDLE; 316 tcp_keepintvl = TCPTV_KEEPINTVL; 317 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 318 tcp_msl = TCPTV_MSL; 319 tcp_rexmit_min = TCPTV_MIN; 320 tcp_rexmit_slop = TCPTV_CPU_VAR; 321 322 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 323 if (!powerof2(hashsize)) { 324 kprintf("WARNING: TCB hash size not a power of 2\n"); 325 hashsize = 512; /* safe default */ 326 } 327 tcp_tcbhashsize = hashsize; 328 porthashbase = hashinit(hashsize, M_PCB, &porthashmask); 329 ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets, 330 ZONE_INTERRUPT, 0); 331 332 for (cpu = 0; cpu < ncpus2; cpu++) { 333 in_pcbinfo_init(&tcbinfo[cpu]); 334 tcbinfo[cpu].cpu = cpu; 335 tcbinfo[cpu].hashbase = hashinit(hashsize, M_PCB, 336 &tcbinfo[cpu].hashmask); 337 tcbinfo[cpu].porthashbase = porthashbase; 338 tcbinfo[cpu].porthashmask = porthashmask; 339 tcbinfo[cpu].wildcardhashbase = hashinit(hashsize, M_PCB, 340 &tcbinfo[cpu].wildcardhashmask); 341 tcbinfo[cpu].ipi_zone = ipi_zone; 342 TAILQ_INIT(&tcpcbackq[cpu]); 343 } 344 345 tcp_reass_maxseg = nmbclusters / 16; 346 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); 347 348 #ifdef INET6 349 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 350 #else 351 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 352 #endif 353 if (max_protohdr < TCP_MINPROTOHDR) 354 max_protohdr = TCP_MINPROTOHDR; 355 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 356 panic("tcp_init"); 357 #undef TCP_MINPROTOHDR 358 359 /* 360 * Initialize TCP statistics counters for each CPU. 361 */ 362 #ifdef SMP 363 for (cpu = 0; cpu < ncpus; ++cpu) { 364 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); 365 } 366 #else 367 bzero(&tcpstat, sizeof(struct tcp_stats)); 368 #endif 369 370 syncache_init(); 371 tcp_sack_init(); 372 tcp_thread_init(); 373 } 374 375 void 376 tcpmsg_service_loop(void *dummy) 377 { 378 struct netmsg *msg; 379 380 while ((msg = lwkt_waitport(&curthread->td_msgport, NULL))) { 381 do { 382 logtcp(rxmsg); 383 msg->nm_lmsg.ms_cmd.cm_func(&msg->nm_lmsg); 384 } while ((msg = lwkt_getport(&curthread->td_msgport)) != NULL); 385 logtcp(delayed); 386 tcp_willblock(); 387 logtcp(wait); 388 } 389 } 390 391 static void 392 tcp_willblock(void) 393 { 394 struct tcpcb *tp; 395 int cpu = mycpu->gd_cpuid; 396 397 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu])) != NULL) { 398 KKASSERT(tp->t_flags & TF_ONOUTPUTQ); 399 tp->t_flags &= ~TF_ONOUTPUTQ; 400 TAILQ_REMOVE(&tcpcbackq[cpu], tp, t_outputq); 401 tcp_output(tp); 402 } 403 } 404 405 406 /* 407 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 408 * tcp_template used to store this data in mbufs, but we now recopy it out 409 * of the tcpcb each time to conserve mbufs. 410 */ 411 void 412 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) 413 { 414 struct inpcb *inp = tp->t_inpcb; 415 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 416 417 #ifdef INET6 418 if (inp->inp_vflag & INP_IPV6) { 419 struct ip6_hdr *ip6; 420 421 ip6 = (struct ip6_hdr *)ip_ptr; 422 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 423 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 424 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 425 (IPV6_VERSION & IPV6_VERSION_MASK); 426 ip6->ip6_nxt = IPPROTO_TCP; 427 ip6->ip6_plen = sizeof(struct tcphdr); 428 ip6->ip6_src = inp->in6p_laddr; 429 ip6->ip6_dst = inp->in6p_faddr; 430 tcp_hdr->th_sum = 0; 431 } else 432 #endif 433 { 434 struct ip *ip = (struct ip *) ip_ptr; 435 436 ip->ip_vhl = IP_VHL_BORING; 437 ip->ip_tos = 0; 438 ip->ip_len = 0; 439 ip->ip_id = 0; 440 ip->ip_off = 0; 441 ip->ip_ttl = 0; 442 ip->ip_sum = 0; 443 ip->ip_p = IPPROTO_TCP; 444 ip->ip_src = inp->inp_laddr; 445 ip->ip_dst = inp->inp_faddr; 446 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, 447 ip->ip_dst.s_addr, 448 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 449 } 450 451 tcp_hdr->th_sport = inp->inp_lport; 452 tcp_hdr->th_dport = inp->inp_fport; 453 tcp_hdr->th_seq = 0; 454 tcp_hdr->th_ack = 0; 455 tcp_hdr->th_x2 = 0; 456 tcp_hdr->th_off = 5; 457 tcp_hdr->th_flags = 0; 458 tcp_hdr->th_win = 0; 459 tcp_hdr->th_urp = 0; 460 } 461 462 /* 463 * Create template to be used to send tcp packets on a connection. 464 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 465 * use for this function is in keepalives, which use tcp_respond. 466 */ 467 struct tcptemp * 468 tcp_maketemplate(struct tcpcb *tp) 469 { 470 struct tcptemp *tmp; 471 472 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) 473 return (NULL); 474 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t); 475 return (tmp); 476 } 477 478 void 479 tcp_freetemplate(struct tcptemp *tmp) 480 { 481 mpipe_free(&tcptemp_mpipe, tmp); 482 } 483 484 /* 485 * Send a single message to the TCP at address specified by 486 * the given TCP/IP header. If m == NULL, then we make a copy 487 * of the tcpiphdr at ti and send directly to the addressed host. 488 * This is used to force keep alive messages out using the TCP 489 * template for a connection. If flags are given then we send 490 * a message back to the TCP which originated the * segment ti, 491 * and discard the mbuf containing it and any other attached mbufs. 492 * 493 * In any case the ack and sequence number of the transmitted 494 * segment are as specified by the parameters. 495 * 496 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 497 */ 498 void 499 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 500 tcp_seq ack, tcp_seq seq, int flags) 501 { 502 int tlen; 503 int win = 0; 504 struct route *ro = NULL; 505 struct route sro; 506 struct ip *ip = ipgen; 507 struct tcphdr *nth; 508 int ipflags = 0; 509 struct route_in6 *ro6 = NULL; 510 struct route_in6 sro6; 511 struct ip6_hdr *ip6 = ipgen; 512 #ifdef INET6 513 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); 514 #else 515 const boolean_t isipv6 = FALSE; 516 #endif 517 518 if (tp != NULL) { 519 if (!(flags & TH_RST)) { 520 win = ssb_space(&tp->t_inpcb->inp_socket->so_rcv); 521 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 522 win = (long)TCP_MAXWIN << tp->rcv_scale; 523 } 524 if (isipv6) 525 ro6 = &tp->t_inpcb->in6p_route; 526 else 527 ro = &tp->t_inpcb->inp_route; 528 } else { 529 if (isipv6) { 530 ro6 = &sro6; 531 bzero(ro6, sizeof *ro6); 532 } else { 533 ro = &sro; 534 bzero(ro, sizeof *ro); 535 } 536 } 537 if (m == NULL) { 538 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 539 if (m == NULL) 540 return; 541 tlen = 0; 542 m->m_data += max_linkhdr; 543 if (isipv6) { 544 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); 545 ip6 = mtod(m, struct ip6_hdr *); 546 nth = (struct tcphdr *)(ip6 + 1); 547 } else { 548 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); 549 ip = mtod(m, struct ip *); 550 nth = (struct tcphdr *)(ip + 1); 551 } 552 bcopy(th, nth, sizeof(struct tcphdr)); 553 flags = TH_ACK; 554 } else { 555 m_freem(m->m_next); 556 m->m_next = NULL; 557 m->m_data = (caddr_t)ipgen; 558 /* m_len is set later */ 559 tlen = 0; 560 #define xchg(a, b, type) { type t; t = a; a = b; b = t; } 561 if (isipv6) { 562 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 563 nth = (struct tcphdr *)(ip6 + 1); 564 } else { 565 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 566 nth = (struct tcphdr *)(ip + 1); 567 } 568 if (th != nth) { 569 /* 570 * this is usually a case when an extension header 571 * exists between the IPv6 header and the 572 * TCP header. 573 */ 574 nth->th_sport = th->th_sport; 575 nth->th_dport = th->th_dport; 576 } 577 xchg(nth->th_dport, nth->th_sport, n_short); 578 #undef xchg 579 } 580 if (isipv6) { 581 ip6->ip6_flow = 0; 582 ip6->ip6_vfc = IPV6_VERSION; 583 ip6->ip6_nxt = IPPROTO_TCP; 584 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); 585 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 586 } else { 587 tlen += sizeof(struct tcpiphdr); 588 ip->ip_len = tlen; 589 ip->ip_ttl = ip_defttl; 590 } 591 m->m_len = tlen; 592 m->m_pkthdr.len = tlen; 593 m->m_pkthdr.rcvif = (struct ifnet *) NULL; 594 nth->th_seq = htonl(seq); 595 nth->th_ack = htonl(ack); 596 nth->th_x2 = 0; 597 nth->th_off = sizeof(struct tcphdr) >> 2; 598 nth->th_flags = flags; 599 if (tp != NULL) 600 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 601 else 602 nth->th_win = htons((u_short)win); 603 nth->th_urp = 0; 604 if (isipv6) { 605 nth->th_sum = 0; 606 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 607 sizeof(struct ip6_hdr), 608 tlen - sizeof(struct ip6_hdr)); 609 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 610 (ro6 && ro6->ro_rt) ? 611 ro6->ro_rt->rt_ifp : NULL); 612 } else { 613 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 614 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 615 m->m_pkthdr.csum_flags = CSUM_TCP; 616 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 617 } 618 #ifdef TCPDEBUG 619 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 620 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 621 #endif 622 if (isipv6) { 623 ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 624 tp ? tp->t_inpcb : NULL); 625 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { 626 RTFREE(ro6->ro_rt); 627 ro6->ro_rt = NULL; 628 } 629 } else { 630 ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); 631 if ((ro == &sro) && (ro->ro_rt != NULL)) { 632 RTFREE(ro->ro_rt); 633 ro->ro_rt = NULL; 634 } 635 } 636 } 637 638 /* 639 * Create a new TCP control block, making an 640 * empty reassembly queue and hooking it to the argument 641 * protocol control block. The `inp' parameter must have 642 * come from the zone allocator set up in tcp_init(). 643 */ 644 struct tcpcb * 645 tcp_newtcpcb(struct inpcb *inp) 646 { 647 struct inp_tp *it; 648 struct tcpcb *tp; 649 #ifdef INET6 650 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 651 #else 652 const boolean_t isipv6 = FALSE; 653 #endif 654 655 it = (struct inp_tp *)inp; 656 tp = &it->tcb; 657 bzero(tp, sizeof(struct tcpcb)); 658 LIST_INIT(&tp->t_segq); 659 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 660 661 /* Set up our timeouts. */ 662 callout_init(tp->tt_rexmt = &it->inp_tp_rexmt); 663 callout_init(tp->tt_persist = &it->inp_tp_persist); 664 callout_init(tp->tt_keep = &it->inp_tp_keep); 665 callout_init(tp->tt_2msl = &it->inp_tp_2msl); 666 callout_init(tp->tt_delack = &it->inp_tp_delack); 667 668 if (tcp_do_rfc1323) 669 tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); 670 if (tcp_do_rfc1644) 671 tp->t_flags |= TF_REQ_CC; 672 tp->t_inpcb = inp; /* XXX */ 673 tp->t_state = TCPS_CLOSED; 674 /* 675 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 676 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 677 * reasonable initial retransmit time. 678 */ 679 tp->t_srtt = TCPTV_SRTTBASE; 680 tp->t_rttvar = 681 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 682 tp->t_rttmin = tcp_rexmit_min; 683 tp->t_rxtcur = TCPTV_RTOBASE; 684 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 685 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 686 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 687 tp->t_rcvtime = ticks; 688 /* 689 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 690 * because the socket may be bound to an IPv6 wildcard address, 691 * which may match an IPv4-mapped IPv6 address. 692 */ 693 inp->inp_ip_ttl = ip_defttl; 694 inp->inp_ppcb = tp; 695 tcp_sack_tcpcb_init(tp); 696 return (tp); /* XXX */ 697 } 698 699 /* 700 * Drop a TCP connection, reporting the specified error. 701 * If connection is synchronized, then send a RST to peer. 702 */ 703 struct tcpcb * 704 tcp_drop(struct tcpcb *tp, int error) 705 { 706 struct socket *so = tp->t_inpcb->inp_socket; 707 708 if (TCPS_HAVERCVDSYN(tp->t_state)) { 709 tp->t_state = TCPS_CLOSED; 710 tcp_output(tp); 711 tcpstat.tcps_drops++; 712 } else 713 tcpstat.tcps_conndrops++; 714 if (error == ETIMEDOUT && tp->t_softerror) 715 error = tp->t_softerror; 716 so->so_error = error; 717 return (tcp_close(tp)); 718 } 719 720 #ifdef SMP 721 722 struct netmsg_remwildcard { 723 struct lwkt_msg nm_lmsg; 724 struct inpcb *nm_inp; 725 struct inpcbinfo *nm_pcbinfo; 726 #if defined(INET6) 727 int nm_isinet6; 728 #else 729 int nm_unused01; 730 #endif 731 }; 732 733 /* 734 * Wildcard inpcb's on SMP boxes must be removed from all cpus before the 735 * inp can be detached. We do this by cycling through the cpus, ending up 736 * on the cpu controlling the inp last and then doing the disconnect. 737 */ 738 static int 739 in_pcbremwildcardhash_handler(struct lwkt_msg *msg0) 740 { 741 struct netmsg_remwildcard *msg = (struct netmsg_remwildcard *)msg0; 742 int cpu; 743 744 cpu = msg->nm_pcbinfo->cpu; 745 746 if (cpu == msg->nm_inp->inp_pcbinfo->cpu) { 747 /* note: detach removes any wildcard hash entry */ 748 #ifdef INET6 749 if (msg->nm_isinet6) 750 in6_pcbdetach(msg->nm_inp); 751 else 752 #endif 753 in_pcbdetach(msg->nm_inp); 754 lwkt_replymsg(&msg->nm_lmsg, 0); 755 } else { 756 in_pcbremwildcardhash_oncpu(msg->nm_inp, msg->nm_pcbinfo); 757 cpu = (cpu + 1) % ncpus2; 758 msg->nm_pcbinfo = &tcbinfo[cpu]; 759 lwkt_forwardmsg(tcp_cport(cpu), &msg->nm_lmsg); 760 } 761 return (EASYNC); 762 } 763 764 #endif 765 766 /* 767 * Close a TCP control block: 768 * discard all space held by the tcp 769 * discard internet protocol block 770 * wake up any sleepers 771 */ 772 struct tcpcb * 773 tcp_close(struct tcpcb *tp) 774 { 775 struct tseg_qent *q; 776 struct inpcb *inp = tp->t_inpcb; 777 struct socket *so = inp->inp_socket; 778 struct rtentry *rt; 779 boolean_t dosavessthresh; 780 #ifdef SMP 781 int cpu; 782 #endif 783 #ifdef INET6 784 boolean_t isipv6 = ((inp->inp_vflag & INP_IPV6) != 0); 785 boolean_t isafinet6 = (INP_CHECK_SOCKAF(so, AF_INET6) != 0); 786 #else 787 const boolean_t isipv6 = FALSE; 788 #endif 789 790 /* 791 * The tp is not instantly destroyed in the wildcard case. Setting 792 * the state to TCPS_TERMINATING will prevent the TCP stack from 793 * messing with it, though it should be noted that this change may 794 * not take effect on other cpus until we have chained the wildcard 795 * hash removal. 796 * 797 * XXX we currently depend on the BGL to synchronize the tp->t_state 798 * update and prevent other tcp protocol threads from accepting new 799 * connections on the listen socket we might be trying to close down. 800 */ 801 KKASSERT(tp->t_state != TCPS_TERMINATING); 802 tp->t_state = TCPS_TERMINATING; 803 804 /* 805 * Make sure that all of our timers are stopped before we 806 * delete the PCB. 807 */ 808 callout_stop(tp->tt_rexmt); 809 callout_stop(tp->tt_persist); 810 callout_stop(tp->tt_keep); 811 callout_stop(tp->tt_2msl); 812 callout_stop(tp->tt_delack); 813 814 if (tp->t_flags & TF_ONOUTPUTQ) { 815 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); 816 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu], tp, t_outputq); 817 tp->t_flags &= ~TF_ONOUTPUTQ; 818 } 819 820 /* 821 * If we got enough samples through the srtt filter, 822 * save the rtt and rttvar in the routing entry. 823 * 'Enough' is arbitrarily defined as the 16 samples. 824 * 16 samples is enough for the srtt filter to converge 825 * to within 5% of the correct value; fewer samples and 826 * we could save a very bogus rtt. 827 * 828 * Don't update the default route's characteristics and don't 829 * update anything that the user "locked". 830 */ 831 if (tp->t_rttupdated >= 16) { 832 u_long i = 0; 833 834 if (isipv6) { 835 struct sockaddr_in6 *sin6; 836 837 if ((rt = inp->in6p_route.ro_rt) == NULL) 838 goto no_valid_rt; 839 sin6 = (struct sockaddr_in6 *)rt_key(rt); 840 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 841 goto no_valid_rt; 842 } else 843 if ((rt = inp->inp_route.ro_rt) == NULL || 844 ((struct sockaddr_in *)rt_key(rt))-> 845 sin_addr.s_addr == INADDR_ANY) 846 goto no_valid_rt; 847 848 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { 849 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 850 if (rt->rt_rmx.rmx_rtt && i) 851 /* 852 * filter this update to half the old & half 853 * the new values, converting scale. 854 * See route.h and tcp_var.h for a 855 * description of the scaling constants. 856 */ 857 rt->rt_rmx.rmx_rtt = 858 (rt->rt_rmx.rmx_rtt + i) / 2; 859 else 860 rt->rt_rmx.rmx_rtt = i; 861 tcpstat.tcps_cachedrtt++; 862 } 863 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { 864 i = tp->t_rttvar * 865 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 866 if (rt->rt_rmx.rmx_rttvar && i) 867 rt->rt_rmx.rmx_rttvar = 868 (rt->rt_rmx.rmx_rttvar + i) / 2; 869 else 870 rt->rt_rmx.rmx_rttvar = i; 871 tcpstat.tcps_cachedrttvar++; 872 } 873 /* 874 * The old comment here said: 875 * update the pipelimit (ssthresh) if it has been updated 876 * already or if a pipesize was specified & the threshhold 877 * got below half the pipesize. I.e., wait for bad news 878 * before we start updating, then update on both good 879 * and bad news. 880 * 881 * But we want to save the ssthresh even if no pipesize is 882 * specified explicitly in the route, because such 883 * connections still have an implicit pipesize specified 884 * by the global tcp_sendspace. In the absence of a reliable 885 * way to calculate the pipesize, it will have to do. 886 */ 887 i = tp->snd_ssthresh; 888 if (rt->rt_rmx.rmx_sendpipe != 0) 889 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); 890 else 891 dosavessthresh = (i < so->so_snd.ssb_hiwat/2); 892 if (dosavessthresh || 893 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && 894 (rt->rt_rmx.rmx_ssthresh != 0))) { 895 /* 896 * convert the limit from user data bytes to 897 * packets then to packet data bytes. 898 */ 899 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 900 if (i < 2) 901 i = 2; 902 i *= tp->t_maxseg + 903 (isipv6 ? 904 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 905 sizeof(struct tcpiphdr)); 906 if (rt->rt_rmx.rmx_ssthresh) 907 rt->rt_rmx.rmx_ssthresh = 908 (rt->rt_rmx.rmx_ssthresh + i) / 2; 909 else 910 rt->rt_rmx.rmx_ssthresh = i; 911 tcpstat.tcps_cachedssthresh++; 912 } 913 } 914 915 no_valid_rt: 916 /* free the reassembly queue, if any */ 917 while((q = LIST_FIRST(&tp->t_segq)) != NULL) { 918 LIST_REMOVE(q, tqe_q); 919 m_freem(q->tqe_m); 920 FREE(q, M_TSEGQ); 921 tcp_reass_qsize--; 922 } 923 /* throw away SACK blocks in scoreboard*/ 924 if (TCP_DO_SACK(tp)) 925 tcp_sack_cleanup(&tp->scb); 926 927 inp->inp_ppcb = NULL; 928 soisdisconnected(so); 929 /* 930 * Discard the inp. In the SMP case a wildcard inp's hash (created 931 * by a listen socket or an INADDR_ANY udp socket) is replicated 932 * for each protocol thread and must be removed in the context of 933 * that thread. This is accomplished by chaining the message 934 * through the cpus. 935 * 936 * If the inp is not wildcarded we simply detach, which will remove 937 * the any hashes still present for this inp. 938 */ 939 #ifdef SMP 940 if (inp->inp_flags & INP_WILDCARD_MP) { 941 struct netmsg_remwildcard *msg; 942 943 cpu = (inp->inp_pcbinfo->cpu + 1) % ncpus2; 944 msg = kmalloc(sizeof(struct netmsg_remwildcard), 945 M_LWKTMSG, M_INTWAIT); 946 lwkt_initmsg(&msg->nm_lmsg, &netisr_afree_rport, 0, 947 lwkt_cmd_func(in_pcbremwildcardhash_handler), 948 lwkt_cmd_op_none); 949 #ifdef INET6 950 msg->nm_isinet6 = isafinet6; 951 #endif 952 msg->nm_inp = inp; 953 msg->nm_pcbinfo = &tcbinfo[cpu]; 954 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_lmsg); 955 } else 956 #endif 957 { 958 /* note: detach removes any wildcard hash entry */ 959 #ifdef INET6 960 if (isafinet6) 961 in6_pcbdetach(inp); 962 else 963 #endif 964 in_pcbdetach(inp); 965 } 966 tcpstat.tcps_closed++; 967 return (NULL); 968 } 969 970 static __inline void 971 tcp_drain_oncpu(struct inpcbhead *head) 972 { 973 struct inpcb *inpb; 974 struct tcpcb *tcpb; 975 struct tseg_qent *te; 976 977 LIST_FOREACH(inpb, head, inp_list) { 978 if (inpb->inp_flags & INP_PLACEMARKER) 979 continue; 980 if ((tcpb = intotcpcb(inpb))) { 981 while ((te = LIST_FIRST(&tcpb->t_segq)) != NULL) { 982 LIST_REMOVE(te, tqe_q); 983 m_freem(te->tqe_m); 984 FREE(te, M_TSEGQ); 985 tcp_reass_qsize--; 986 } 987 } 988 } 989 } 990 991 #ifdef SMP 992 struct netmsg_tcp_drain { 993 struct lwkt_msg nm_lmsg; 994 struct inpcbhead *nm_head; 995 }; 996 997 static int 998 tcp_drain_handler(lwkt_msg_t lmsg) 999 { 1000 struct netmsg_tcp_drain *nm = (void *)lmsg; 1001 1002 tcp_drain_oncpu(nm->nm_head); 1003 lwkt_replymsg(lmsg, 0); 1004 return(EASYNC); 1005 } 1006 #endif 1007 1008 void 1009 tcp_drain(void) 1010 { 1011 #ifdef SMP 1012 int cpu; 1013 #endif 1014 1015 if (!do_tcpdrain) 1016 return; 1017 1018 /* 1019 * Walk the tcpbs, if existing, and flush the reassembly queue, 1020 * if there is one... 1021 * XXX: The "Net/3" implementation doesn't imply that the TCP 1022 * reassembly queue should be flushed, but in a situation 1023 * where we're really low on mbufs, this is potentially 1024 * useful. 1025 */ 1026 #ifdef SMP 1027 for (cpu = 0; cpu < ncpus2; cpu++) { 1028 struct netmsg_tcp_drain *msg; 1029 1030 if (cpu == mycpu->gd_cpuid) { 1031 tcp_drain_oncpu(&tcbinfo[cpu].pcblisthead); 1032 } else { 1033 msg = kmalloc(sizeof(struct netmsg_tcp_drain), 1034 M_LWKTMSG, M_NOWAIT); 1035 if (msg == NULL) 1036 continue; 1037 lwkt_initmsg(&msg->nm_lmsg, &netisr_afree_rport, 0, 1038 lwkt_cmd_func(tcp_drain_handler), 1039 lwkt_cmd_op_none); 1040 msg->nm_head = &tcbinfo[cpu].pcblisthead; 1041 lwkt_sendmsg(tcp_cport(cpu), &msg->nm_lmsg); 1042 } 1043 } 1044 #else 1045 tcp_drain_oncpu(&tcbinfo[0].pcblisthead); 1046 #endif 1047 } 1048 1049 /* 1050 * Notify a tcp user of an asynchronous error; 1051 * store error as soft error, but wake up user 1052 * (for now, won't do anything until can select for soft error). 1053 * 1054 * Do not wake up user since there currently is no mechanism for 1055 * reporting soft errors (yet - a kqueue filter may be added). 1056 */ 1057 static void 1058 tcp_notify(struct inpcb *inp, int error) 1059 { 1060 struct tcpcb *tp = intotcpcb(inp); 1061 1062 /* 1063 * Ignore some errors if we are hooked up. 1064 * If connection hasn't completed, has retransmitted several times, 1065 * and receives a second error, give up now. This is better 1066 * than waiting a long time to establish a connection that 1067 * can never complete. 1068 */ 1069 if (tp->t_state == TCPS_ESTABLISHED && 1070 (error == EHOSTUNREACH || error == ENETUNREACH || 1071 error == EHOSTDOWN)) { 1072 return; 1073 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1074 tp->t_softerror) 1075 tcp_drop(tp, error); 1076 else 1077 tp->t_softerror = error; 1078 #if 0 1079 wakeup(&so->so_timeo); 1080 sorwakeup(so); 1081 sowwakeup(so); 1082 #endif 1083 } 1084 1085 static int 1086 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1087 { 1088 int error, i, n; 1089 struct inpcb *marker; 1090 struct inpcb *inp; 1091 inp_gen_t gencnt; 1092 globaldata_t gd; 1093 int origcpu, ccpu; 1094 1095 error = 0; 1096 n = 0; 1097 1098 /* 1099 * The process of preparing the TCB list is too time-consuming and 1100 * resource-intensive to repeat twice on every request. 1101 */ 1102 if (req->oldptr == NULL) { 1103 for (ccpu = 0; ccpu < ncpus; ++ccpu) { 1104 gd = globaldata_find(ccpu); 1105 n += tcbinfo[gd->gd_cpuid].ipi_count; 1106 } 1107 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb); 1108 return (0); 1109 } 1110 1111 if (req->newptr != NULL) 1112 return (EPERM); 1113 1114 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); 1115 marker->inp_flags |= INP_PLACEMARKER; 1116 1117 /* 1118 * OK, now we're committed to doing something. Run the inpcb list 1119 * for each cpu in the system and construct the output. Use a 1120 * list placemarker to deal with list changes occuring during 1121 * copyout blockages (but otherwise depend on being on the correct 1122 * cpu to avoid races). 1123 */ 1124 origcpu = mycpu->gd_cpuid; 1125 for (ccpu = 1; ccpu <= ncpus && error == 0; ++ccpu) { 1126 globaldata_t rgd; 1127 caddr_t inp_ppcb; 1128 struct xtcpcb xt; 1129 int cpu_id; 1130 1131 cpu_id = (origcpu + ccpu) % ncpus; 1132 if ((smp_active_mask & (1 << cpu_id)) == 0) 1133 continue; 1134 rgd = globaldata_find(cpu_id); 1135 lwkt_setcpu_self(rgd); 1136 1137 gencnt = tcbinfo[cpu_id].ipi_gencnt; 1138 n = tcbinfo[cpu_id].ipi_count; 1139 1140 LIST_INSERT_HEAD(&tcbinfo[cpu_id].pcblisthead, marker, inp_list); 1141 i = 0; 1142 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { 1143 /* 1144 * process a snapshot of pcbs, ignoring placemarkers 1145 * and using our own to allow SYSCTL_OUT to block. 1146 */ 1147 LIST_REMOVE(marker, inp_list); 1148 LIST_INSERT_AFTER(inp, marker, inp_list); 1149 1150 if (inp->inp_flags & INP_PLACEMARKER) 1151 continue; 1152 if (inp->inp_gencnt > gencnt) 1153 continue; 1154 if (prison_xinpcb(req->td, inp)) 1155 continue; 1156 1157 xt.xt_len = sizeof xt; 1158 bcopy(inp, &xt.xt_inp, sizeof *inp); 1159 inp_ppcb = inp->inp_ppcb; 1160 if (inp_ppcb != NULL) 1161 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 1162 else 1163 bzero(&xt.xt_tp, sizeof xt.xt_tp); 1164 if (inp->inp_socket) 1165 sotoxsocket(inp->inp_socket, &xt.xt_socket); 1166 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) 1167 break; 1168 ++i; 1169 } 1170 LIST_REMOVE(marker, inp_list); 1171 if (error == 0 && i < n) { 1172 bzero(&xt, sizeof xt); 1173 xt.xt_len = sizeof xt; 1174 while (i < n) { 1175 error = SYSCTL_OUT(req, &xt, sizeof xt); 1176 if (error) 1177 break; 1178 ++i; 1179 } 1180 } 1181 } 1182 1183 /* 1184 * Make sure we are on the same cpu we were on originally, since 1185 * higher level callers expect this. Also don't pollute caches with 1186 * migrated userland data by (eventually) returning to userland 1187 * on a different cpu. 1188 */ 1189 lwkt_setcpu_self(globaldata_find(origcpu)); 1190 kfree(marker, M_TEMP); 1191 return (error); 1192 } 1193 1194 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1195 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1196 1197 static int 1198 tcp_getcred(SYSCTL_HANDLER_ARGS) 1199 { 1200 struct sockaddr_in addrs[2]; 1201 struct inpcb *inp; 1202 int cpu; 1203 int error; 1204 1205 error = suser(req->td); 1206 if (error != 0) 1207 return (error); 1208 error = SYSCTL_IN(req, addrs, sizeof addrs); 1209 if (error != 0) 1210 return (error); 1211 crit_enter(); 1212 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 1213 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 1214 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 1215 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1216 if (inp == NULL || inp->inp_socket == NULL) { 1217 error = ENOENT; 1218 goto out; 1219 } 1220 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1221 out: 1222 crit_exit(); 1223 return (error); 1224 } 1225 1226 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1227 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 1228 1229 #ifdef INET6 1230 static int 1231 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1232 { 1233 struct sockaddr_in6 addrs[2]; 1234 struct inpcb *inp; 1235 int error; 1236 boolean_t mapped = FALSE; 1237 1238 error = suser(req->td); 1239 if (error != 0) 1240 return (error); 1241 error = SYSCTL_IN(req, addrs, sizeof addrs); 1242 if (error != 0) 1243 return (error); 1244 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1245 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1246 mapped = TRUE; 1247 else 1248 return (EINVAL); 1249 } 1250 crit_enter(); 1251 if (mapped) { 1252 inp = in_pcblookup_hash(&tcbinfo[0], 1253 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1254 addrs[1].sin6_port, 1255 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1256 addrs[0].sin6_port, 1257 0, NULL); 1258 } else { 1259 inp = in6_pcblookup_hash(&tcbinfo[0], 1260 &addrs[1].sin6_addr, addrs[1].sin6_port, 1261 &addrs[0].sin6_addr, addrs[0].sin6_port, 1262 0, NULL); 1263 } 1264 if (inp == NULL || inp->inp_socket == NULL) { 1265 error = ENOENT; 1266 goto out; 1267 } 1268 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1269 out: 1270 crit_exit(); 1271 return (error); 1272 } 1273 1274 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1275 0, 0, 1276 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1277 #endif 1278 1279 void 1280 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 1281 { 1282 struct ip *ip = vip; 1283 struct tcphdr *th; 1284 struct in_addr faddr; 1285 struct inpcb *inp; 1286 struct tcpcb *tp; 1287 void (*notify)(struct inpcb *, int) = tcp_notify; 1288 tcp_seq icmpseq; 1289 int arg, cpu; 1290 1291 if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 1292 return; 1293 } 1294 1295 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1296 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1297 return; 1298 1299 arg = inetctlerrmap[cmd]; 1300 if (cmd == PRC_QUENCH) { 1301 notify = tcp_quench; 1302 } else if (icmp_may_rst && 1303 (cmd == PRC_UNREACH_ADMIN_PROHIB || 1304 cmd == PRC_UNREACH_PORT || 1305 cmd == PRC_TIMXCEED_INTRANS) && 1306 ip != NULL) { 1307 notify = tcp_drop_syn_sent; 1308 } else if (cmd == PRC_MSGSIZE) { 1309 struct icmp *icmp = (struct icmp *) 1310 ((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 1311 1312 arg = ntohs(icmp->icmp_nextmtu); 1313 notify = tcp_mtudisc; 1314 } else if (PRC_IS_REDIRECT(cmd)) { 1315 ip = NULL; 1316 notify = in_rtchange; 1317 } else if (cmd == PRC_HOSTDEAD) { 1318 ip = NULL; 1319 } 1320 1321 if (ip != NULL) { 1322 crit_enter(); 1323 th = (struct tcphdr *)((caddr_t)ip + 1324 (IP_VHL_HL(ip->ip_vhl) << 2)); 1325 cpu = tcp_addrcpu(faddr.s_addr, th->th_dport, 1326 ip->ip_src.s_addr, th->th_sport); 1327 inp = in_pcblookup_hash(&tcbinfo[cpu], faddr, th->th_dport, 1328 ip->ip_src, th->th_sport, 0, NULL); 1329 if ((inp != NULL) && (inp->inp_socket != NULL)) { 1330 icmpseq = htonl(th->th_seq); 1331 tp = intotcpcb(inp); 1332 if (SEQ_GEQ(icmpseq, tp->snd_una) && 1333 SEQ_LT(icmpseq, tp->snd_max)) 1334 (*notify)(inp, arg); 1335 } else { 1336 struct in_conninfo inc; 1337 1338 inc.inc_fport = th->th_dport; 1339 inc.inc_lport = th->th_sport; 1340 inc.inc_faddr = faddr; 1341 inc.inc_laddr = ip->ip_src; 1342 #ifdef INET6 1343 inc.inc_isipv6 = 0; 1344 #endif 1345 syncache_unreach(&inc, th); 1346 } 1347 crit_exit(); 1348 } else { 1349 for (cpu = 0; cpu < ncpus2; cpu++) { 1350 in_pcbnotifyall(&tcbinfo[cpu].pcblisthead, faddr, arg, 1351 notify); 1352 } 1353 } 1354 } 1355 1356 #ifdef INET6 1357 void 1358 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) 1359 { 1360 struct tcphdr th; 1361 void (*notify) (struct inpcb *, int) = tcp_notify; 1362 struct ip6_hdr *ip6; 1363 struct mbuf *m; 1364 struct ip6ctlparam *ip6cp = NULL; 1365 const struct sockaddr_in6 *sa6_src = NULL; 1366 int off; 1367 struct tcp_portonly { 1368 u_int16_t th_sport; 1369 u_int16_t th_dport; 1370 } *thp; 1371 int arg; 1372 1373 if (sa->sa_family != AF_INET6 || 1374 sa->sa_len != sizeof(struct sockaddr_in6)) 1375 return; 1376 1377 arg = 0; 1378 if (cmd == PRC_QUENCH) 1379 notify = tcp_quench; 1380 else if (cmd == PRC_MSGSIZE) { 1381 struct ip6ctlparam *ip6cp = d; 1382 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; 1383 1384 arg = ntohl(icmp6->icmp6_mtu); 1385 notify = tcp_mtudisc; 1386 } else if (!PRC_IS_REDIRECT(cmd) && 1387 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) { 1388 return; 1389 } 1390 1391 /* if the parameter is from icmp6, decode it. */ 1392 if (d != NULL) { 1393 ip6cp = (struct ip6ctlparam *)d; 1394 m = ip6cp->ip6c_m; 1395 ip6 = ip6cp->ip6c_ip6; 1396 off = ip6cp->ip6c_off; 1397 sa6_src = ip6cp->ip6c_src; 1398 } else { 1399 m = NULL; 1400 ip6 = NULL; 1401 off = 0; /* fool gcc */ 1402 sa6_src = &sa6_any; 1403 } 1404 1405 if (ip6 != NULL) { 1406 struct in_conninfo inc; 1407 /* 1408 * XXX: We assume that when IPV6 is non NULL, 1409 * M and OFF are valid. 1410 */ 1411 1412 /* check if we can safely examine src and dst ports */ 1413 if (m->m_pkthdr.len < off + sizeof *thp) 1414 return; 1415 1416 bzero(&th, sizeof th); 1417 m_copydata(m, off, sizeof *thp, (caddr_t)&th); 1418 1419 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, th.th_dport, 1420 (struct sockaddr *)ip6cp->ip6c_src, 1421 th.th_sport, cmd, arg, notify); 1422 1423 inc.inc_fport = th.th_dport; 1424 inc.inc_lport = th.th_sport; 1425 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1426 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1427 inc.inc_isipv6 = 1; 1428 syncache_unreach(&inc, &th); 1429 } else 1430 in6_pcbnotify(&tcbinfo[0].pcblisthead, sa, 0, 1431 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify); 1432 } 1433 #endif 1434 1435 /* 1436 * Following is where TCP initial sequence number generation occurs. 1437 * 1438 * There are two places where we must use initial sequence numbers: 1439 * 1. In SYN-ACK packets. 1440 * 2. In SYN packets. 1441 * 1442 * All ISNs for SYN-ACK packets are generated by the syncache. See 1443 * tcp_syncache.c for details. 1444 * 1445 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1446 * depends on this property. In addition, these ISNs should be 1447 * unguessable so as to prevent connection hijacking. To satisfy 1448 * the requirements of this situation, the algorithm outlined in 1449 * RFC 1948 is used to generate sequence numbers. 1450 * 1451 * Implementation details: 1452 * 1453 * Time is based off the system timer, and is corrected so that it 1454 * increases by one megabyte per second. This allows for proper 1455 * recycling on high speed LANs while still leaving over an hour 1456 * before rollover. 1457 * 1458 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1459 * between seeding of isn_secret. This is normally set to zero, 1460 * as reseeding should not be necessary. 1461 * 1462 */ 1463 1464 #define ISN_BYTES_PER_SECOND 1048576 1465 1466 u_char isn_secret[32]; 1467 int isn_last_reseed; 1468 MD5_CTX isn_ctx; 1469 1470 tcp_seq 1471 tcp_new_isn(struct tcpcb *tp) 1472 { 1473 u_int32_t md5_buffer[4]; 1474 tcp_seq new_isn; 1475 1476 /* Seed if this is the first use, reseed if requested. */ 1477 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1478 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1479 < (u_int)ticks))) { 1480 read_random_unlimited(&isn_secret, sizeof isn_secret); 1481 isn_last_reseed = ticks; 1482 } 1483 1484 /* Compute the md5 hash and return the ISN. */ 1485 MD5Init(&isn_ctx); 1486 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short)); 1487 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short)); 1488 #ifdef INET6 1489 if (tp->t_inpcb->inp_vflag & INP_IPV6) { 1490 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1491 sizeof(struct in6_addr)); 1492 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1493 sizeof(struct in6_addr)); 1494 } else 1495 #endif 1496 { 1497 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1498 sizeof(struct in_addr)); 1499 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1500 sizeof(struct in_addr)); 1501 } 1502 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1503 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1504 new_isn = (tcp_seq) md5_buffer[0]; 1505 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1506 return (new_isn); 1507 } 1508 1509 /* 1510 * When a source quench is received, close congestion window 1511 * to one segment. We will gradually open it again as we proceed. 1512 */ 1513 void 1514 tcp_quench(struct inpcb *inp, int error) 1515 { 1516 struct tcpcb *tp = intotcpcb(inp); 1517 1518 if (tp != NULL) { 1519 tp->snd_cwnd = tp->t_maxseg; 1520 tp->snd_wacked = 0; 1521 } 1522 } 1523 1524 /* 1525 * When a specific ICMP unreachable message is received and the 1526 * connection state is SYN-SENT, drop the connection. This behavior 1527 * is controlled by the icmp_may_rst sysctl. 1528 */ 1529 void 1530 tcp_drop_syn_sent(struct inpcb *inp, int error) 1531 { 1532 struct tcpcb *tp = intotcpcb(inp); 1533 1534 if ((tp != NULL) && (tp->t_state == TCPS_SYN_SENT)) 1535 tcp_drop(tp, error); 1536 } 1537 1538 /* 1539 * When a `need fragmentation' ICMP is received, update our idea of the MSS 1540 * based on the new value in the route. Also nudge TCP to send something, 1541 * since we know the packet we just sent was dropped. 1542 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1543 */ 1544 void 1545 tcp_mtudisc(struct inpcb *inp, int mtu) 1546 { 1547 struct tcpcb *tp = intotcpcb(inp); 1548 struct rtentry *rt; 1549 struct socket *so = inp->inp_socket; 1550 int maxopd, mss; 1551 #ifdef INET6 1552 boolean_t isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0); 1553 #else 1554 const boolean_t isipv6 = FALSE; 1555 #endif 1556 1557 if (tp == NULL) 1558 return; 1559 1560 /* 1561 * If no MTU is provided in the ICMP message, use the 1562 * next lower likely value, as specified in RFC 1191. 1563 */ 1564 if (mtu == 0) { 1565 int oldmtu; 1566 1567 oldmtu = tp->t_maxopd + 1568 (isipv6 ? 1569 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1570 sizeof(struct tcpiphdr)); 1571 mtu = ip_next_mtu(oldmtu, 0); 1572 } 1573 1574 if (isipv6) 1575 rt = tcp_rtlookup6(&inp->inp_inc); 1576 else 1577 rt = tcp_rtlookup(&inp->inp_inc); 1578 if (rt != NULL) { 1579 struct rmxp_tao *taop = rmx_taop(rt->rt_rmx); 1580 1581 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu) 1582 mtu = rt->rt_rmx.rmx_mtu; 1583 1584 maxopd = mtu - 1585 (isipv6 ? 1586 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1587 sizeof(struct tcpiphdr)); 1588 1589 /* 1590 * XXX - The following conditional probably violates the TCP 1591 * spec. The problem is that, since we don't know the 1592 * other end's MSS, we are supposed to use a conservative 1593 * default. But, if we do that, then MTU discovery will 1594 * never actually take place, because the conservative 1595 * default is much less than the MTUs typically seen 1596 * on the Internet today. For the moment, we'll sweep 1597 * this under the carpet. 1598 * 1599 * The conservative default might not actually be a problem 1600 * if the only case this occurs is when sending an initial 1601 * SYN with options and data to a host we've never talked 1602 * to before. Then, they will reply with an MSS value which 1603 * will get recorded and the new parameters should get 1604 * recomputed. For Further Study. 1605 */ 1606 if (taop->tao_mssopt != 0 && taop->tao_mssopt < maxopd) 1607 maxopd = taop->tao_mssopt; 1608 } else 1609 maxopd = mtu - 1610 (isipv6 ? 1611 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1612 sizeof(struct tcpiphdr)); 1613 1614 if (tp->t_maxopd <= maxopd) 1615 return; 1616 tp->t_maxopd = maxopd; 1617 1618 mss = maxopd; 1619 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) == 1620 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 1621 mss -= TCPOLEN_TSTAMP_APPA; 1622 1623 if ((tp->t_flags & (TF_REQ_CC | TF_RCVD_CC | TF_NOOPT)) == 1624 (TF_REQ_CC | TF_RCVD_CC)) 1625 mss -= TCPOLEN_CC_APPA; 1626 1627 /* round down to multiple of MCLBYTES */ 1628 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */ 1629 if (mss > MCLBYTES) 1630 mss &= ~(MCLBYTES - 1); 1631 #else 1632 if (mss > MCLBYTES) 1633 mss = (mss / MCLBYTES) * MCLBYTES; 1634 #endif 1635 1636 if (so->so_snd.ssb_hiwat < mss) 1637 mss = so->so_snd.ssb_hiwat; 1638 1639 tp->t_maxseg = mss; 1640 tp->t_rtttime = 0; 1641 tp->snd_nxt = tp->snd_una; 1642 tcp_output(tp); 1643 tcpstat.tcps_mturesent++; 1644 } 1645 1646 /* 1647 * Look-up the routing entry to the peer of this inpcb. If no route 1648 * is found and it cannot be allocated the return NULL. This routine 1649 * is called by TCP routines that access the rmx structure and by tcp_mss 1650 * to get the interface MTU. 1651 */ 1652 struct rtentry * 1653 tcp_rtlookup(struct in_conninfo *inc) 1654 { 1655 struct route *ro = &inc->inc_route; 1656 1657 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) { 1658 /* No route yet, so try to acquire one */ 1659 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1660 /* 1661 * unused portions of the structure MUST be zero'd 1662 * out because rtalloc() treats it as opaque data 1663 */ 1664 bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); 1665 ro->ro_dst.sa_family = AF_INET; 1666 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1667 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1668 inc->inc_faddr; 1669 rtalloc(ro); 1670 } 1671 } 1672 return (ro->ro_rt); 1673 } 1674 1675 #ifdef INET6 1676 struct rtentry * 1677 tcp_rtlookup6(struct in_conninfo *inc) 1678 { 1679 struct route_in6 *ro6 = &inc->inc6_route; 1680 1681 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) { 1682 /* No route yet, so try to acquire one */ 1683 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1684 /* 1685 * unused portions of the structure MUST be zero'd 1686 * out because rtalloc() treats it as opaque data 1687 */ 1688 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); 1689 ro6->ro_dst.sin6_family = AF_INET6; 1690 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1691 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1692 rtalloc((struct route *)ro6); 1693 } 1694 } 1695 return (ro6->ro_rt); 1696 } 1697 #endif 1698 1699 #ifdef IPSEC 1700 /* compute ESP/AH header size for TCP, including outer IP header. */ 1701 size_t 1702 ipsec_hdrsiz_tcp(struct tcpcb *tp) 1703 { 1704 struct inpcb *inp; 1705 struct mbuf *m; 1706 size_t hdrsiz; 1707 struct ip *ip; 1708 struct tcphdr *th; 1709 1710 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1711 return (0); 1712 MGETHDR(m, MB_DONTWAIT, MT_DATA); 1713 if (!m) 1714 return (0); 1715 1716 #ifdef INET6 1717 if (inp->inp_vflag & INP_IPV6) { 1718 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 1719 1720 th = (struct tcphdr *)(ip6 + 1); 1721 m->m_pkthdr.len = m->m_len = 1722 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1723 tcp_fillheaders(tp, ip6, th); 1724 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1725 } else 1726 #endif 1727 { 1728 ip = mtod(m, struct ip *); 1729 th = (struct tcphdr *)(ip + 1); 1730 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1731 tcp_fillheaders(tp, ip, th); 1732 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1733 } 1734 1735 m_free(m); 1736 return (hdrsiz); 1737 } 1738 #endif 1739 1740 /* 1741 * Return a pointer to the cached information about the remote host. 1742 * The cached information is stored in the protocol specific part of 1743 * the route metrics. 1744 */ 1745 struct rmxp_tao * 1746 tcp_gettaocache(struct in_conninfo *inc) 1747 { 1748 struct rtentry *rt; 1749 1750 #ifdef INET6 1751 if (inc->inc_isipv6) 1752 rt = tcp_rtlookup6(inc); 1753 else 1754 #endif 1755 rt = tcp_rtlookup(inc); 1756 1757 /* Make sure this is a host route and is up. */ 1758 if (rt == NULL || 1759 (rt->rt_flags & (RTF_UP | RTF_HOST)) != (RTF_UP | RTF_HOST)) 1760 return (NULL); 1761 1762 return (rmx_taop(rt->rt_rmx)); 1763 } 1764 1765 /* 1766 * Clear all the TAO cache entries, called from tcp_init. 1767 * 1768 * XXX 1769 * This routine is just an empty one, because we assume that the routing 1770 * routing tables are initialized at the same time when TCP, so there is 1771 * nothing in the cache left over. 1772 */ 1773 static void 1774 tcp_cleartaocache(void) 1775 { 1776 } 1777 1778 /* 1779 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1780 * 1781 * This code attempts to calculate the bandwidth-delay product as a 1782 * means of determining the optimal window size to maximize bandwidth, 1783 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1784 * routers. This code also does a fairly good job keeping RTTs in check 1785 * across slow links like modems. We implement an algorithm which is very 1786 * similar (but not meant to be) TCP/Vegas. The code operates on the 1787 * transmitter side of a TCP connection and so only effects the transmit 1788 * side of the connection. 1789 * 1790 * BACKGROUND: TCP makes no provision for the management of buffer space 1791 * at the end points or at the intermediate routers and switches. A TCP 1792 * stream, whether using NewReno or not, will eventually buffer as 1793 * many packets as it is able and the only reason this typically works is 1794 * due to the fairly small default buffers made available for a connection 1795 * (typicaly 16K or 32K). As machines use larger windows and/or window 1796 * scaling it is now fairly easy for even a single TCP connection to blow-out 1797 * all available buffer space not only on the local interface, but on 1798 * intermediate routers and switches as well. NewReno makes a misguided 1799 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1800 * then backing off, then steadily increasing the window again until another 1801 * failure occurs, ad-infinitum. This results in terrible oscillation that 1802 * is only made worse as network loads increase and the idea of intentionally 1803 * blowing out network buffers is, frankly, a terrible way to manage network 1804 * resources. 1805 * 1806 * It is far better to limit the transmit window prior to the failure 1807 * condition being achieved. There are two general ways to do this: First 1808 * you can 'scan' through different transmit window sizes and locate the 1809 * point where the RTT stops increasing, indicating that you have filled the 1810 * pipe, then scan backwards until you note that RTT stops decreasing, then 1811 * repeat ad-infinitum. This method works in principle but has severe 1812 * implementation issues due to RTT variances, timer granularity, and 1813 * instability in the algorithm which can lead to many false positives and 1814 * create oscillations as well as interact badly with other TCP streams 1815 * implementing the same algorithm. 1816 * 1817 * The second method is to limit the window to the bandwidth delay product 1818 * of the link. This is the method we implement. RTT variances and our 1819 * own manipulation of the congestion window, bwnd, can potentially 1820 * destabilize the algorithm. For this reason we have to stabilize the 1821 * elements used to calculate the window. We do this by using the minimum 1822 * observed RTT, the long term average of the observed bandwidth, and 1823 * by adding two segments worth of slop. It isn't perfect but it is able 1824 * to react to changing conditions and gives us a very stable basis on 1825 * which to extend the algorithm. 1826 */ 1827 void 1828 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1829 { 1830 u_long bw; 1831 u_long bwnd; 1832 int save_ticks; 1833 int delta_ticks; 1834 1835 /* 1836 * If inflight_enable is disabled in the middle of a tcp connection, 1837 * make sure snd_bwnd is effectively disabled. 1838 */ 1839 if (!tcp_inflight_enable) { 1840 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1841 tp->snd_bandwidth = 0; 1842 return; 1843 } 1844 1845 /* 1846 * Validate the delta time. If a connection is new or has been idle 1847 * a long time we have to reset the bandwidth calculator. 1848 */ 1849 save_ticks = ticks; 1850 delta_ticks = save_ticks - tp->t_bw_rtttime; 1851 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) { 1852 tp->t_bw_rtttime = ticks; 1853 tp->t_bw_rtseq = ack_seq; 1854 if (tp->snd_bandwidth == 0) 1855 tp->snd_bandwidth = tcp_inflight_min; 1856 return; 1857 } 1858 if (delta_ticks == 0) 1859 return; 1860 1861 /* 1862 * Sanity check, plus ignore pure window update acks. 1863 */ 1864 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0) 1865 return; 1866 1867 /* 1868 * Figure out the bandwidth. Due to the tick granularity this 1869 * is a very rough number and it MUST be averaged over a fairly 1870 * long period of time. XXX we need to take into account a link 1871 * that is not using all available bandwidth, but for now our 1872 * slop will ramp us up if this case occurs and the bandwidth later 1873 * increases. 1874 */ 1875 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks; 1876 tp->t_bw_rtttime = save_ticks; 1877 tp->t_bw_rtseq = ack_seq; 1878 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1879 1880 tp->snd_bandwidth = bw; 1881 1882 /* 1883 * Calculate the semi-static bandwidth delay product, plus two maximal 1884 * segments. The additional slop puts us squarely in the sweet 1885 * spot and also handles the bandwidth run-up case. Without the 1886 * slop we could be locking ourselves into a lower bandwidth. 1887 * 1888 * Situations Handled: 1889 * (1) Prevents over-queueing of packets on LANs, especially on 1890 * high speed LANs, allowing larger TCP buffers to be 1891 * specified, and also does a good job preventing 1892 * over-queueing of packets over choke points like modems 1893 * (at least for the transmit side). 1894 * 1895 * (2) Is able to handle changing network loads (bandwidth 1896 * drops so bwnd drops, bandwidth increases so bwnd 1897 * increases). 1898 * 1899 * (3) Theoretically should stabilize in the face of multiple 1900 * connections implementing the same algorithm (this may need 1901 * a little work). 1902 * 1903 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1904 * be adjusted with a sysctl but typically only needs to be on 1905 * very slow connections. A value no smaller then 5 should 1906 * be used, but only reduce this default if you have no other 1907 * choice. 1908 */ 1909 1910 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1911 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 1912 tcp_inflight_stab * (int)tp->t_maxseg / 10; 1913 #undef USERTT 1914 1915 if (tcp_inflight_debug > 0) { 1916 static int ltime; 1917 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1918 ltime = ticks; 1919 kprintf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1920 tp, bw, tp->t_rttbest, tp->t_srtt, bwnd); 1921 } 1922 } 1923 if ((long)bwnd < tcp_inflight_min) 1924 bwnd = tcp_inflight_min; 1925 if (bwnd > tcp_inflight_max) 1926 bwnd = tcp_inflight_max; 1927 if ((long)bwnd < tp->t_maxseg * 2) 1928 bwnd = tp->t_maxseg * 2; 1929 tp->snd_bwnd = bwnd; 1930 } 1931