1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 63 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 64 */ 65 66 #include "opt_inet.h" 67 #include "opt_inet6.h" 68 #include "opt_ipsec.h" 69 #include "opt_tcpdebug.h" 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/callout.h> 74 #include <sys/kernel.h> 75 #include <sys/sysctl.h> 76 #include <sys/malloc.h> 77 #include <sys/mpipe.h> 78 #include <sys/mbuf.h> 79 #ifdef INET6 80 #include <sys/domain.h> 81 #endif 82 #include <sys/proc.h> 83 #include <sys/priv.h> 84 #include <sys/socket.h> 85 #include <sys/socketops.h> 86 #include <sys/socketvar.h> 87 #include <sys/protosw.h> 88 #include <sys/random.h> 89 #include <sys/in_cksum.h> 90 #include <sys/ktr.h> 91 92 #include <net/route.h> 93 #include <net/if.h> 94 #include <net/netisr2.h> 95 96 #define _IP_VHL 97 #include <netinet/in.h> 98 #include <netinet/in_systm.h> 99 #include <netinet/ip.h> 100 #include <netinet/ip6.h> 101 #include <netinet/in_pcb.h> 102 #include <netinet6/in6_pcb.h> 103 #include <netinet/in_var.h> 104 #include <netinet/ip_var.h> 105 #include <netinet6/ip6_var.h> 106 #include <netinet/ip_icmp.h> 107 #ifdef INET6 108 #include <netinet/icmp6.h> 109 #endif 110 #include <netinet/tcp.h> 111 #include <netinet/tcp_fsm.h> 112 #include <netinet/tcp_seq.h> 113 #include <netinet/tcp_timer.h> 114 #include <netinet/tcp_timer2.h> 115 #include <netinet/tcp_var.h> 116 #include <netinet6/tcp6_var.h> 117 #include <netinet/tcpip.h> 118 #ifdef TCPDEBUG 119 #include <netinet/tcp_debug.h> 120 #endif 121 #include <netinet6/ip6protosw.h> 122 123 #ifdef IPSEC 124 #include <netinet6/ipsec.h> 125 #include <netproto/key/key.h> 126 #ifdef INET6 127 #include <netinet6/ipsec6.h> 128 #endif 129 #endif 130 131 #ifdef FAST_IPSEC 132 #include <netproto/ipsec/ipsec.h> 133 #ifdef INET6 134 #include <netproto/ipsec/ipsec6.h> 135 #endif 136 #define IPSEC 137 #endif 138 139 #include <sys/md5.h> 140 #include <machine/smp.h> 141 142 #include <sys/msgport2.h> 143 #include <sys/mplock2.h> 144 #include <net/netmsg2.h> 145 146 #if !defined(KTR_TCP) 147 #define KTR_TCP KTR_ALL 148 #endif 149 /* 150 KTR_INFO_MASTER(tcp); 151 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0); 152 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0); 153 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0); 154 #define logtcp(name) KTR_LOG(tcp_ ## name) 155 */ 156 157 #define TCP_IW_MAXSEGS_DFLT 4 158 #define TCP_IW_CAPSEGS_DFLT 4 159 160 struct tcp_reass_pcpu { 161 int draining; 162 struct netmsg_base drain_nmsg; 163 } __cachealign; 164 165 struct inpcbinfo tcbinfo[MAXCPU]; 166 struct tcpcbackq tcpcbackq[MAXCPU]; 167 struct tcp_reass_pcpu tcp_reassq[MAXCPU]; 168 169 int tcp_mssdflt = TCP_MSS; 170 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 171 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); 172 173 #ifdef INET6 174 int tcp_v6mssdflt = TCP6_MSS; 175 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, 176 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); 177 #endif 178 179 /* 180 * Minimum MSS we accept and use. This prevents DoS attacks where 181 * we are forced to a ridiculous low MSS like 20 and send hundreds 182 * of packets instead of one. The effect scales with the available 183 * bandwidth and quickly saturates the CPU and network interface 184 * with packet generation and sending. Set to zero to disable MINMSS 185 * checking. This setting prevents us from sending too small packets. 186 */ 187 int tcp_minmss = TCP_MINMSS; 188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 189 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 190 191 #if 0 192 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 193 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 194 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); 195 #endif 196 197 int tcp_do_rfc1323 = 1; 198 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 199 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); 200 201 static int tcp_tcbhashsize = 0; 202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 203 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); 204 205 static int do_tcpdrain = 1; 206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 207 "Enable tcp_drain routine for extra help when low on mbufs"); 208 209 static int icmp_may_rst = 1; 210 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 211 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 212 213 static int tcp_isn_reseed_interval = 0; 214 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 215 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 216 217 /* 218 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on 219 * by default, but with generous values which should allow maximal 220 * bandwidth. In particular, the slop defaults to 50 (5 packets). 221 * 222 * The reason for doing this is that the limiter is the only mechanism we 223 * have which seems to do a really good job preventing receiver RX rings 224 * on network interfaces from getting blown out. Even though GigE/10GigE 225 * is supposed to flow control it looks like either it doesn't actually 226 * do it or Open Source drivers do not properly enable it. 227 * 228 * People using the limiter to reduce bottlenecks on slower WAN connections 229 * should set the slop to 20 (2 packets). 230 */ 231 static int tcp_inflight_enable = 1; 232 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 233 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 234 235 static int tcp_inflight_debug = 0; 236 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 237 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 238 239 /* 240 * NOTE: tcp_inflight_start is essentially the starting receive window 241 * for a connection. If set too low then fetches over tcp 242 * connections will take noticably longer to ramp-up over 243 * high-latency connections. 6144 is too low for a default, 244 * use something more reasonable. 245 */ 246 static int tcp_inflight_start = 33792; 247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_start, CTLFLAG_RW, 248 &tcp_inflight_start, 0, "Start value for TCP inflight window"); 249 250 static int tcp_inflight_min = 6144; 251 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 252 &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); 253 254 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 255 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 256 &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); 257 258 static int tcp_inflight_stab = 50; 259 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 260 &tcp_inflight_stab, 0, "Fudge bw 1/10% (50=5%)"); 261 262 static int tcp_inflight_adjrtt = 2; 263 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_adjrtt, CTLFLAG_RW, 264 &tcp_inflight_adjrtt, 0, "Slop for rtt 1/(hz*32)"); 265 266 static int tcp_do_rfc3390 = 1; 267 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 268 &tcp_do_rfc3390, 0, 269 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 270 271 static u_long tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT; 272 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwmaxsegs, CTLFLAG_RW, 273 &tcp_iw_maxsegs, 0, "TCP IW segments max"); 274 275 static u_long tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT; 276 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwcapsegs, CTLFLAG_RW, 277 &tcp_iw_capsegs, 0, "TCP IW segments"); 278 279 int tcp_low_rtobase = 1; 280 SYSCTL_INT(_net_inet_tcp, OID_AUTO, low_rtobase, CTLFLAG_RW, 281 &tcp_low_rtobase, 0, "Lowering the Initial RTO (RFC 6298)"); 282 283 static int tcp_do_ncr = 1; 284 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr, CTLFLAG_RW, 285 &tcp_do_ncr, 0, "Non-Congestion Robustness (RFC 4653)"); 286 287 int tcp_ncr_linklocal = 0; 288 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr_linklocal, CTLFLAG_RW, 289 &tcp_ncr_linklocal, 0, 290 "Enable Non-Congestion Robustness (RFC 4653) on link local network"); 291 292 int tcp_ncr_rxtthresh_max = 16; 293 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr_rxtthresh_max, CTLFLAG_RW, 294 &tcp_ncr_rxtthresh_max, 0, 295 "Non-Congestion Robustness (RFC 4653), DupThresh upper limit"); 296 297 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); 298 static struct malloc_pipe tcptemp_mpipe; 299 300 static void tcp_willblock(void); 301 static void tcp_notify (struct inpcb *, int); 302 303 struct tcp_stats tcpstats_percpu[MAXCPU] __cachealign; 304 struct tcp_state_count tcpstate_count[MAXCPU] __cachealign; 305 306 static void tcp_drain_dispatch(netmsg_t nmsg); 307 308 static int 309 sysctl_tcpstats(SYSCTL_HANDLER_ARGS) 310 { 311 int cpu, error = 0; 312 313 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 314 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu], 315 sizeof(struct tcp_stats)))) 316 break; 317 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu], 318 sizeof(struct tcp_stats)))) 319 break; 320 } 321 322 return (error); 323 } 324 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 325 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); 326 327 /* 328 * Target size of TCP PCB hash tables. Must be a power of two. 329 * 330 * Note that this can be overridden by the kernel environment 331 * variable net.inet.tcp.tcbhashsize 332 */ 333 #ifndef TCBHASHSIZE 334 #define TCBHASHSIZE 512 335 #endif 336 CTASSERT((TCBHASHSIZE & (TCBHASHSIZE - 1)) == 0); 337 338 /* 339 * This is the actual shape of what we allocate using the zone 340 * allocator. Doing it this way allows us to protect both structures 341 * using the same generation count, and also eliminates the overhead 342 * of allocating tcpcbs separately. By hiding the structure here, 343 * we avoid changing most of the rest of the code (although it needs 344 * to be changed, eventually, for greater efficiency). 345 */ 346 #define ALIGNMENT 32 347 #define ALIGNM1 (ALIGNMENT - 1) 348 struct inp_tp { 349 union { 350 struct inpcb inp; 351 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 352 } inp_tp_u; 353 struct tcpcb tcb; 354 struct tcp_callout inp_tp_rexmt; 355 struct tcp_callout inp_tp_persist; 356 struct tcp_callout inp_tp_keep; 357 struct tcp_callout inp_tp_2msl; 358 struct tcp_callout inp_tp_delack; 359 struct netmsg_tcp_timer inp_tp_timermsg; 360 struct netmsg_base inp_tp_sndmore; 361 }; 362 #undef ALIGNMENT 363 #undef ALIGNM1 364 365 /* 366 * Tcp initialization 367 */ 368 void 369 tcp_init(void) 370 { 371 struct inpcbportinfo *portinfo; 372 struct inpcbinfo *ticb; 373 int hashsize = TCBHASHSIZE, portinfo_hsize; 374 int cpu; 375 376 /* 377 * note: tcptemp is used for keepalives, and it is ok for an 378 * allocation to fail so do not specify MPF_INT. 379 */ 380 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), 381 25, -1, 0, NULL, NULL, NULL); 382 383 tcp_delacktime = TCPTV_DELACK; 384 tcp_keepinit = TCPTV_KEEP_INIT; 385 tcp_keepidle = TCPTV_KEEP_IDLE; 386 tcp_keepintvl = TCPTV_KEEPINTVL; 387 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 388 tcp_msl = TCPTV_MSL; 389 tcp_rexmit_min = TCPTV_MIN; 390 if (tcp_rexmit_min < 1) /* if kern.hz is too low */ 391 tcp_rexmit_min = 1; 392 tcp_rexmit_slop = TCPTV_CPU_VAR; 393 394 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 395 if (!powerof2(hashsize)) { 396 kprintf("WARNING: TCB hash size not a power of 2\n"); 397 hashsize = TCBHASHSIZE; /* safe default */ 398 } 399 tcp_tcbhashsize = hashsize; 400 401 portinfo_hsize = 65536 / netisr_ncpus; 402 if (portinfo_hsize > hashsize) 403 portinfo_hsize = hashsize; 404 405 portinfo = kmalloc_cachealign(sizeof(*portinfo) * netisr_ncpus, M_PCB, 406 M_WAITOK); 407 408 for (cpu = 0; cpu < netisr_ncpus; cpu++) { 409 ticb = &tcbinfo[cpu]; 410 in_pcbinfo_init(ticb, cpu, FALSE); 411 ticb->hashbase = hashinit(hashsize, M_PCB, 412 &ticb->hashmask); 413 in_pcbportinfo_init(&portinfo[cpu], portinfo_hsize, cpu); 414 in_pcbportinfo_set(ticb, portinfo, netisr_ncpus); 415 ticb->wildcardhashbase = hashinit(hashsize, M_PCB, 416 &ticb->wildcardhashmask); 417 ticb->localgrphashbase = hashinit(hashsize, M_PCB, 418 &ticb->localgrphashmask); 419 ticb->ipi_size = sizeof(struct inp_tp); 420 TAILQ_INIT(&tcpcbackq[cpu].head); 421 } 422 423 tcp_reass_maxseg = nmbclusters / 16; 424 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); 425 426 #ifdef INET6 427 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 428 #else 429 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 430 #endif 431 if (max_protohdr < TCP_MINPROTOHDR) 432 max_protohdr = TCP_MINPROTOHDR; 433 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 434 panic("tcp_init"); 435 #undef TCP_MINPROTOHDR 436 437 /* 438 * Initialize TCP statistics counters for each CPU. 439 */ 440 for (cpu = 0; cpu < netisr_ncpus; ++cpu) 441 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); 442 443 /* 444 * Initialize netmsgs for TCP drain 445 */ 446 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 447 netmsg_init(&tcp_reassq[cpu].drain_nmsg, NULL, 448 &netisr_adone_rport, MSGF_PRIORITY, tcp_drain_dispatch); 449 } 450 451 syncache_init(); 452 netisr_register_rollup(tcp_willblock, NETISR_ROLLUP_PRIO_TCP); 453 } 454 455 static void 456 tcp_willblock(void) 457 { 458 struct tcpcb *tp; 459 int cpu = mycpuid; 460 461 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu].head)) != NULL) { 462 KKASSERT(tp->t_flags & TF_ONOUTPUTQ); 463 tp->t_flags &= ~TF_ONOUTPUTQ; 464 TAILQ_REMOVE(&tcpcbackq[cpu].head, tp, t_outputq); 465 tcp_output(tp); 466 } 467 } 468 469 /* 470 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 471 * tcp_template used to store this data in mbufs, but we now recopy it out 472 * of the tcpcb each time to conserve mbufs. 473 */ 474 void 475 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr, boolean_t tso) 476 { 477 struct inpcb *inp = tp->t_inpcb; 478 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 479 480 #ifdef INET6 481 if (INP_ISIPV6(inp)) { 482 struct ip6_hdr *ip6; 483 484 ip6 = (struct ip6_hdr *)ip_ptr; 485 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 486 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 487 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 488 (IPV6_VERSION & IPV6_VERSION_MASK); 489 ip6->ip6_nxt = IPPROTO_TCP; 490 ip6->ip6_plen = sizeof(struct tcphdr); 491 ip6->ip6_src = inp->in6p_laddr; 492 ip6->ip6_dst = inp->in6p_faddr; 493 tcp_hdr->th_sum = 0; 494 } else 495 #endif 496 { 497 struct ip *ip = (struct ip *) ip_ptr; 498 u_int plen; 499 500 ip->ip_vhl = IP_VHL_BORING; 501 ip->ip_tos = 0; 502 ip->ip_len = 0; 503 ip->ip_id = 0; 504 ip->ip_off = 0; 505 ip->ip_ttl = 0; 506 ip->ip_sum = 0; 507 ip->ip_p = IPPROTO_TCP; 508 ip->ip_src = inp->inp_laddr; 509 ip->ip_dst = inp->inp_faddr; 510 511 if (tso) 512 plen = htons(IPPROTO_TCP); 513 else 514 plen = htons(sizeof(struct tcphdr) + IPPROTO_TCP); 515 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, 516 ip->ip_dst.s_addr, plen); 517 } 518 519 tcp_hdr->th_sport = inp->inp_lport; 520 tcp_hdr->th_dport = inp->inp_fport; 521 tcp_hdr->th_seq = 0; 522 tcp_hdr->th_ack = 0; 523 tcp_hdr->th_x2 = 0; 524 tcp_hdr->th_off = 5; 525 tcp_hdr->th_flags = 0; 526 tcp_hdr->th_win = 0; 527 tcp_hdr->th_urp = 0; 528 } 529 530 /* 531 * Create template to be used to send tcp packets on a connection. 532 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 533 * use for this function is in keepalives, which use tcp_respond. 534 */ 535 struct tcptemp * 536 tcp_maketemplate(struct tcpcb *tp) 537 { 538 struct tcptemp *tmp; 539 540 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) 541 return (NULL); 542 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t, FALSE); 543 return (tmp); 544 } 545 546 void 547 tcp_freetemplate(struct tcptemp *tmp) 548 { 549 mpipe_free(&tcptemp_mpipe, tmp); 550 } 551 552 /* 553 * Send a single message to the TCP at address specified by 554 * the given TCP/IP header. If m == NULL, then we make a copy 555 * of the tcpiphdr at ti and send directly to the addressed host. 556 * This is used to force keep alive messages out using the TCP 557 * template for a connection. If flags are given then we send 558 * a message back to the TCP which originated the * segment ti, 559 * and discard the mbuf containing it and any other attached mbufs. 560 * 561 * In any case the ack and sequence number of the transmitted 562 * segment are as specified by the parameters. 563 * 564 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 565 */ 566 void 567 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 568 tcp_seq ack, tcp_seq seq, int flags) 569 { 570 int tlen; 571 long win = 0; 572 struct route *ro = NULL; 573 struct route sro; 574 struct ip *ip = ipgen; 575 struct tcphdr *nth; 576 int ipflags = 0; 577 struct route_in6 *ro6 = NULL; 578 struct route_in6 sro6; 579 struct ip6_hdr *ip6 = ipgen; 580 struct inpcb *inp = NULL; 581 boolean_t use_tmpro = TRUE; 582 #ifdef INET6 583 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); 584 #else 585 const boolean_t isipv6 = FALSE; 586 #endif 587 588 if (tp != NULL) { 589 inp = tp->t_inpcb; 590 if (!(flags & TH_RST)) { 591 win = ssb_space(&inp->inp_socket->so_rcv); 592 if (win < 0) 593 win = 0; 594 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 595 win = (long)TCP_MAXWIN << tp->rcv_scale; 596 } 597 /* 598 * Don't use the route cache of a listen socket, 599 * it is not MPSAFE; use temporary route cache. 600 */ 601 if (tp->t_state != TCPS_LISTEN) { 602 if (isipv6) 603 ro6 = &inp->in6p_route; 604 else 605 ro = &inp->inp_route; 606 use_tmpro = FALSE; 607 } 608 } 609 if (use_tmpro) { 610 if (isipv6) { 611 ro6 = &sro6; 612 bzero(ro6, sizeof *ro6); 613 } else { 614 ro = &sro; 615 bzero(ro, sizeof *ro); 616 } 617 } 618 if (m == NULL) { 619 m = m_gethdr(M_NOWAIT, MT_HEADER); 620 if (m == NULL) 621 return; 622 tlen = 0; 623 m->m_data += max_linkhdr; 624 if (isipv6) { 625 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); 626 ip6 = mtod(m, struct ip6_hdr *); 627 nth = (struct tcphdr *)(ip6 + 1); 628 } else { 629 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); 630 ip = mtod(m, struct ip *); 631 nth = (struct tcphdr *)(ip + 1); 632 } 633 bcopy(th, nth, sizeof(struct tcphdr)); 634 flags = TH_ACK; 635 } else { 636 m_freem(m->m_next); 637 m->m_next = NULL; 638 m->m_data = (caddr_t)ipgen; 639 /* m_len is set later */ 640 tlen = 0; 641 #define xchg(a, b, type) { type t; t = a; a = b; b = t; } 642 if (isipv6) { 643 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 644 nth = (struct tcphdr *)(ip6 + 1); 645 } else { 646 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 647 nth = (struct tcphdr *)(ip + 1); 648 } 649 if (th != nth) { 650 /* 651 * this is usually a case when an extension header 652 * exists between the IPv6 header and the 653 * TCP header. 654 */ 655 nth->th_sport = th->th_sport; 656 nth->th_dport = th->th_dport; 657 } 658 xchg(nth->th_dport, nth->th_sport, n_short); 659 #undef xchg 660 } 661 if (isipv6) { 662 ip6->ip6_flow = 0; 663 ip6->ip6_vfc = IPV6_VERSION; 664 ip6->ip6_nxt = IPPROTO_TCP; 665 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); 666 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 667 } else { 668 tlen += sizeof(struct tcpiphdr); 669 ip->ip_len = tlen; 670 ip->ip_ttl = ip_defttl; 671 } 672 m->m_len = tlen; 673 m->m_pkthdr.len = tlen; 674 m->m_pkthdr.rcvif = NULL; 675 nth->th_seq = htonl(seq); 676 nth->th_ack = htonl(ack); 677 nth->th_x2 = 0; 678 nth->th_off = sizeof(struct tcphdr) >> 2; 679 nth->th_flags = flags; 680 if (tp != NULL) 681 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 682 else 683 nth->th_win = htons((u_short)win); 684 nth->th_urp = 0; 685 if (isipv6) { 686 nth->th_sum = 0; 687 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 688 sizeof(struct ip6_hdr), 689 tlen - sizeof(struct ip6_hdr)); 690 ip6->ip6_hlim = in6_selecthlim(inp, 691 (ro6 && ro6->ro_rt) ? ro6->ro_rt->rt_ifp : NULL); 692 } else { 693 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 694 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 695 m->m_pkthdr.csum_flags = CSUM_TCP; 696 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 697 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr); 698 } 699 #ifdef TCPDEBUG 700 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 701 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 702 #endif 703 if (isipv6) { 704 ip6_output(m, NULL, ro6, ipflags, NULL, NULL, inp); 705 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { 706 RTFREE(ro6->ro_rt); 707 ro6->ro_rt = NULL; 708 } 709 } else { 710 if (inp != NULL && (inp->inp_flags & INP_HASH)) 711 m_sethash(m, inp->inp_hashval); 712 ipflags |= IP_DEBUGROUTE; 713 ip_output(m, NULL, ro, ipflags, NULL, inp); 714 if ((ro == &sro) && (ro->ro_rt != NULL)) { 715 RTFREE(ro->ro_rt); 716 ro->ro_rt = NULL; 717 } 718 } 719 } 720 721 /* 722 * Create a new TCP control block, making an 723 * empty reassembly queue and hooking it to the argument 724 * protocol control block. The `inp' parameter must have 725 * come from the zone allocator set up in tcp_init(). 726 */ 727 void 728 tcp_newtcpcb(struct inpcb *inp) 729 { 730 struct inp_tp *it; 731 struct tcpcb *tp; 732 #ifdef INET6 733 boolean_t isipv6 = INP_ISIPV6(inp); 734 #else 735 const boolean_t isipv6 = FALSE; 736 #endif 737 738 it = (struct inp_tp *)inp; 739 tp = &it->tcb; 740 bzero(tp, sizeof(struct tcpcb)); 741 TAILQ_INIT(&tp->t_segq); 742 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 743 tp->t_rxtthresh = tcprexmtthresh; 744 745 /* Set up our timeouts. */ 746 tp->tt_rexmt = &it->inp_tp_rexmt; 747 tp->tt_persist = &it->inp_tp_persist; 748 tp->tt_keep = &it->inp_tp_keep; 749 tp->tt_2msl = &it->inp_tp_2msl; 750 tp->tt_delack = &it->inp_tp_delack; 751 tcp_inittimers(tp); 752 753 /* 754 * Zero out timer message. We don't create it here, 755 * since the current CPU may not be the owner of this 756 * inpcb. 757 */ 758 tp->tt_msg = &it->inp_tp_timermsg; 759 bzero(tp->tt_msg, sizeof(*tp->tt_msg)); 760 761 tp->t_keepinit = tcp_keepinit; 762 tp->t_keepidle = tcp_keepidle; 763 tp->t_keepintvl = tcp_keepintvl; 764 tp->t_keepcnt = tcp_keepcnt; 765 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt; 766 767 if (tcp_do_ncr) 768 tp->t_flags |= TF_NCR; 769 if (tcp_do_rfc1323) 770 tp->t_flags |= (TF_REQ_SCALE | TF_REQ_TSTMP); 771 772 tp->t_inpcb = inp; /* XXX */ 773 TCP_STATE_INIT(tp); 774 /* 775 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 776 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 777 * reasonable initial retransmit time. 778 */ 779 tp->t_srtt = TCPTV_SRTTBASE; 780 tp->t_rttvar = 781 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 782 tp->t_rttmin = tcp_rexmit_min; 783 tp->t_rxtcur = TCPTV_RTOBASE; 784 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 785 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 786 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 787 tp->snd_last = ticks; 788 tp->t_rcvtime = ticks; 789 /* 790 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 791 * because the socket may be bound to an IPv6 wildcard address, 792 * which may match an IPv4-mapped IPv6 address. 793 */ 794 inp->inp_ip_ttl = ip_defttl; 795 inp->inp_ppcb = tp; 796 tcp_sack_tcpcb_init(tp); 797 798 tp->tt_sndmore = &it->inp_tp_sndmore; 799 tcp_output_init(tp); 800 } 801 802 /* 803 * Drop a TCP connection, reporting the specified error. 804 * If connection is synchronized, then send a RST to peer. 805 */ 806 struct tcpcb * 807 tcp_drop(struct tcpcb *tp, int error) 808 { 809 struct socket *so = tp->t_inpcb->inp_socket; 810 811 if (TCPS_HAVERCVDSYN(tp->t_state)) { 812 TCP_STATE_CHANGE(tp, TCPS_CLOSED); 813 tcp_output(tp); 814 tcpstat.tcps_drops++; 815 } else 816 tcpstat.tcps_conndrops++; 817 if (error == ETIMEDOUT && tp->t_softerror) 818 error = tp->t_softerror; 819 so->so_error = error; 820 return (tcp_close(tp)); 821 } 822 823 struct netmsg_listen_detach { 824 struct netmsg_base base; 825 struct tcpcb *nm_tp; 826 struct tcpcb *nm_tp_inh; 827 }; 828 829 static void 830 tcp_listen_detach_handler(netmsg_t msg) 831 { 832 struct netmsg_listen_detach *nmsg = (struct netmsg_listen_detach *)msg; 833 struct tcpcb *tp = nmsg->nm_tp; 834 int cpu = mycpuid, nextcpu; 835 836 if (tp->t_flags & TF_LISTEN) { 837 syncache_destroy(tp, nmsg->nm_tp_inh); 838 tcp_pcbport_merge_oncpu(tp); 839 } 840 841 in_pcbremwildcardhash_oncpu(tp->t_inpcb, &tcbinfo[cpu]); 842 843 nextcpu = cpu + 1; 844 if (nextcpu < netisr_ncpus) 845 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->base.lmsg); 846 else 847 lwkt_replymsg(&nmsg->base.lmsg, 0); 848 } 849 850 /* 851 * Close a TCP control block: 852 * discard all space held by the tcp 853 * discard internet protocol block 854 * wake up any sleepers 855 */ 856 struct tcpcb * 857 tcp_close(struct tcpcb *tp) 858 { 859 struct tseg_qent *q; 860 struct inpcb *inp = tp->t_inpcb; 861 struct inpcb *inp_inh = NULL; 862 struct tcpcb *tp_inh = NULL; 863 struct socket *so = inp->inp_socket; 864 struct rtentry *rt; 865 boolean_t dosavessthresh; 866 #ifdef INET6 867 boolean_t isipv6 = INP_ISIPV6(inp); 868 #else 869 const boolean_t isipv6 = FALSE; 870 #endif 871 872 if (tp->t_flags & TF_LISTEN) { 873 /* 874 * Pending socket/syncache inheritance 875 * 876 * If this is a listen(2) socket, find another listen(2) 877 * socket in the same local group, which could inherit 878 * the syncache and sockets pending on the completion 879 * and incompletion queues. 880 * 881 * NOTE: 882 * Currently the inheritance could only happen on the 883 * listen(2) sockets w/ SO_REUSEPORT set. 884 */ 885 ASSERT_NETISR0; 886 inp_inh = in_pcblocalgroup_last(&tcbinfo[0], inp); 887 if (inp_inh != NULL) 888 tp_inh = intotcpcb(inp_inh); 889 } 890 891 /* 892 * INP_WILDCARD indicates that listen(2) has been called on 893 * this socket. This implies: 894 * - A wildcard inp's hash is replicated for each protocol thread. 895 * - Syncache for this inp grows independently in each protocol 896 * thread. 897 * - There is more than one cpu 898 * 899 * We have to chain a message to the rest of the protocol threads 900 * to cleanup the wildcard hash and the syncache. The cleanup 901 * in the current protocol thread is defered till the end of this 902 * function (syncache_destroy and in_pcbdetach). 903 * 904 * NOTE: 905 * After cleanup the inp's hash and syncache entries, this inp will 906 * no longer be available to the rest of the protocol threads, so we 907 * are safe to whack the inp in the following code. 908 */ 909 if ((inp->inp_flags & INP_WILDCARD) && netisr_ncpus > 1) { 910 struct netmsg_listen_detach nmsg; 911 912 KKASSERT(so->so_port == netisr_cpuport(0)); 913 ASSERT_NETISR0; 914 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); 915 916 netmsg_init(&nmsg.base, NULL, &curthread->td_msgport, 917 MSGF_PRIORITY, tcp_listen_detach_handler); 918 nmsg.nm_tp = tp; 919 nmsg.nm_tp_inh = tp_inh; 920 lwkt_domsg(netisr_cpuport(1), &nmsg.base.lmsg, 0); 921 } 922 923 TCP_STATE_TERM(tp); 924 925 /* 926 * Make sure that all of our timers are stopped before we 927 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL), 928 * timers are never used. If timer message is never created 929 * (tp->tt_msg->tt_tcb == NULL), timers are never used too. 930 */ 931 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) { 932 tcp_callout_stop(tp, tp->tt_rexmt); 933 tcp_callout_stop(tp, tp->tt_persist); 934 tcp_callout_stop(tp, tp->tt_keep); 935 tcp_callout_stop(tp, tp->tt_2msl); 936 tcp_callout_stop(tp, tp->tt_delack); 937 } 938 939 if (tp->t_flags & TF_ONOUTPUTQ) { 940 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); 941 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu].head, tp, t_outputq); 942 tp->t_flags &= ~TF_ONOUTPUTQ; 943 } 944 945 /* 946 * If we got enough samples through the srtt filter, 947 * save the rtt and rttvar in the routing entry. 948 * 'Enough' is arbitrarily defined as the 16 samples. 949 * 16 samples is enough for the srtt filter to converge 950 * to within 5% of the correct value; fewer samples and 951 * we could save a very bogus rtt. 952 * 953 * Don't update the default route's characteristics and don't 954 * update anything that the user "locked". 955 */ 956 if (tp->t_rttupdated >= 16) { 957 u_long i = 0; 958 959 if (isipv6) { 960 struct sockaddr_in6 *sin6; 961 962 if ((rt = inp->in6p_route.ro_rt) == NULL) 963 goto no_valid_rt; 964 sin6 = (struct sockaddr_in6 *)rt_key(rt); 965 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 966 goto no_valid_rt; 967 } else 968 if ((rt = inp->inp_route.ro_rt) == NULL || 969 ((struct sockaddr_in *)rt_key(rt))-> 970 sin_addr.s_addr == INADDR_ANY) 971 goto no_valid_rt; 972 973 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { 974 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 975 if (rt->rt_rmx.rmx_rtt && i) 976 /* 977 * filter this update to half the old & half 978 * the new values, converting scale. 979 * See route.h and tcp_var.h for a 980 * description of the scaling constants. 981 */ 982 rt->rt_rmx.rmx_rtt = 983 (rt->rt_rmx.rmx_rtt + i) / 2; 984 else 985 rt->rt_rmx.rmx_rtt = i; 986 tcpstat.tcps_cachedrtt++; 987 } 988 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { 989 i = tp->t_rttvar * 990 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 991 if (rt->rt_rmx.rmx_rttvar && i) 992 rt->rt_rmx.rmx_rttvar = 993 (rt->rt_rmx.rmx_rttvar + i) / 2; 994 else 995 rt->rt_rmx.rmx_rttvar = i; 996 tcpstat.tcps_cachedrttvar++; 997 } 998 /* 999 * The old comment here said: 1000 * update the pipelimit (ssthresh) if it has been updated 1001 * already or if a pipesize was specified & the threshhold 1002 * got below half the pipesize. I.e., wait for bad news 1003 * before we start updating, then update on both good 1004 * and bad news. 1005 * 1006 * But we want to save the ssthresh even if no pipesize is 1007 * specified explicitly in the route, because such 1008 * connections still have an implicit pipesize specified 1009 * by the global tcp_sendspace. In the absence of a reliable 1010 * way to calculate the pipesize, it will have to do. 1011 */ 1012 i = tp->snd_ssthresh; 1013 if (rt->rt_rmx.rmx_sendpipe != 0) 1014 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); 1015 else 1016 dosavessthresh = (i < so->so_snd.ssb_hiwat/2); 1017 if (dosavessthresh || 1018 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && 1019 (rt->rt_rmx.rmx_ssthresh != 0))) { 1020 /* 1021 * convert the limit from user data bytes to 1022 * packets then to packet data bytes. 1023 */ 1024 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 1025 if (i < 2) 1026 i = 2; 1027 i *= tp->t_maxseg + 1028 (isipv6 ? 1029 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1030 sizeof(struct tcpiphdr)); 1031 if (rt->rt_rmx.rmx_ssthresh) 1032 rt->rt_rmx.rmx_ssthresh = 1033 (rt->rt_rmx.rmx_ssthresh + i) / 2; 1034 else 1035 rt->rt_rmx.rmx_ssthresh = i; 1036 tcpstat.tcps_cachedssthresh++; 1037 } 1038 } 1039 1040 no_valid_rt: 1041 /* free the reassembly queue, if any */ 1042 while((q = TAILQ_FIRST(&tp->t_segq)) != NULL) { 1043 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 1044 m_freem(q->tqe_m); 1045 kfree(q, M_TSEGQ); 1046 atomic_add_int(&tcp_reass_qsize, -1); 1047 } 1048 /* throw away SACK blocks in scoreboard*/ 1049 if (TCP_DO_SACK(tp)) 1050 tcp_sack_destroy(&tp->scb); 1051 1052 inp->inp_ppcb = NULL; 1053 soisdisconnected(so); 1054 /* note: pcb detached later on */ 1055 1056 tcp_destroy_timermsg(tp); 1057 tcp_output_cancel(tp); 1058 1059 if (tp->t_flags & TF_LISTEN) { 1060 syncache_destroy(tp, tp_inh); 1061 tcp_pcbport_merge_oncpu(tp); 1062 tcp_pcbport_destroy(tp); 1063 if (inp_inh != NULL && inp_inh->inp_socket != NULL) { 1064 /* 1065 * Pending sockets inheritance only needs 1066 * to be done once in the current thread, 1067 * i.e. netisr0. 1068 */ 1069 soinherit(so, inp_inh->inp_socket); 1070 } 1071 } 1072 KASSERT(tp->t_pcbport == NULL, ("tcpcb port cache is not destroyed")); 1073 1074 so_async_rcvd_drop(so); 1075 /* Drop the reference for the asynchronized pru_rcvd */ 1076 sofree(so); 1077 1078 /* 1079 * NOTE: 1080 * - Remove self from listen tcpcb per-cpu port cache _before_ 1081 * pcbdetach. 1082 * - pcbdetach removes any wildcard hash entry on the current CPU. 1083 */ 1084 tcp_pcbport_remove(inp); 1085 #ifdef INET6 1086 if (isipv6) 1087 in6_pcbdetach(inp); 1088 else 1089 #endif 1090 in_pcbdetach(inp); 1091 1092 tcpstat.tcps_closed++; 1093 return (NULL); 1094 } 1095 1096 /* 1097 * Walk the tcpbs, if existing, and flush the reassembly queue, 1098 * if there is one... 1099 */ 1100 static void 1101 tcp_drain_oncpu(struct inpcbinfo *pcbinfo) 1102 { 1103 struct inpcbhead *head = &pcbinfo->pcblisthead; 1104 struct inpcb *inpb; 1105 1106 /* 1107 * Since we run in netisr, it is MP safe, even if 1108 * we block during the inpcb list iteration, i.e. 1109 * we don't need to use inpcb marker here. 1110 */ 1111 ASSERT_NETISR_NCPUS(pcbinfo->cpu); 1112 1113 LIST_FOREACH(inpb, head, inp_list) { 1114 struct tcpcb *tcpb; 1115 struct tseg_qent *te; 1116 1117 if (inpb->inp_flags & INP_PLACEMARKER) 1118 continue; 1119 1120 tcpb = intotcpcb(inpb); 1121 KASSERT(tcpb != NULL, ("tcp_drain_oncpu: tcpb is NULL")); 1122 1123 if ((te = TAILQ_FIRST(&tcpb->t_segq)) != NULL) { 1124 TAILQ_REMOVE(&tcpb->t_segq, te, tqe_q); 1125 if (te->tqe_th->th_flags & TH_FIN) 1126 tcpb->t_flags &= ~TF_QUEDFIN; 1127 m_freem(te->tqe_m); 1128 kfree(te, M_TSEGQ); 1129 atomic_add_int(&tcp_reass_qsize, -1); 1130 /* retry */ 1131 } 1132 } 1133 } 1134 1135 static void 1136 tcp_drain_dispatch(netmsg_t nmsg) 1137 { 1138 crit_enter(); 1139 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1140 crit_exit(); 1141 1142 tcp_drain_oncpu(&tcbinfo[mycpuid]); 1143 tcp_reassq[mycpuid].draining = 0; 1144 } 1145 1146 static void 1147 tcp_drain_ipi(void *arg __unused) 1148 { 1149 int cpu = mycpuid; 1150 struct lwkt_msg *msg = &tcp_reassq[cpu].drain_nmsg.lmsg; 1151 1152 crit_enter(); 1153 if (msg->ms_flags & MSGF_DONE) 1154 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 1155 crit_exit(); 1156 } 1157 1158 void 1159 tcp_drain(void) 1160 { 1161 cpumask_t mask; 1162 int cpu; 1163 1164 if (!do_tcpdrain) 1165 return; 1166 1167 if (tcp_reass_qsize == 0) 1168 return; 1169 1170 CPUMASK_ASSBMASK(mask, netisr_ncpus); 1171 CPUMASK_ANDMASK(mask, smp_active_mask); 1172 1173 cpu = mycpuid; 1174 if (IN_NETISR_NCPUS(cpu)) { 1175 tcp_drain_oncpu(&tcbinfo[cpu]); 1176 CPUMASK_NANDBIT(mask, cpu); 1177 } 1178 1179 if (tcp_reass_qsize < netisr_ncpus) { 1180 /* Does not worth the trouble. */ 1181 return; 1182 } 1183 1184 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 1185 if (!CPUMASK_TESTBIT(mask, cpu)) 1186 continue; 1187 1188 if (tcp_reassq[cpu].draining) { 1189 /* Draining; skip this cpu. */ 1190 CPUMASK_NANDBIT(mask, cpu); 1191 continue; 1192 } 1193 tcp_reassq[cpu].draining = 1; 1194 } 1195 1196 if (CPUMASK_TESTNZERO(mask)) 1197 lwkt_send_ipiq_mask(mask, tcp_drain_ipi, NULL); 1198 } 1199 1200 /* 1201 * Notify a tcp user of an asynchronous error; 1202 * store error as soft error, but wake up user 1203 * (for now, won't do anything until can select for soft error). 1204 * 1205 * Do not wake up user since there currently is no mechanism for 1206 * reporting soft errors (yet - a kqueue filter may be added). 1207 */ 1208 static void 1209 tcp_notify(struct inpcb *inp, int error) 1210 { 1211 struct tcpcb *tp = intotcpcb(inp); 1212 1213 /* 1214 * Ignore some errors if we are hooked up. 1215 * If connection hasn't completed, has retransmitted several times, 1216 * and receives a second error, give up now. This is better 1217 * than waiting a long time to establish a connection that 1218 * can never complete. 1219 */ 1220 if (tp->t_state == TCPS_ESTABLISHED && 1221 (error == EHOSTUNREACH || error == ENETUNREACH || 1222 error == EHOSTDOWN)) { 1223 return; 1224 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1225 tp->t_softerror) 1226 tcp_drop(tp, error); 1227 else 1228 tp->t_softerror = error; 1229 #if 0 1230 wakeup(&so->so_timeo); 1231 sorwakeup(so); 1232 sowwakeup(so); 1233 #endif 1234 } 1235 1236 static int 1237 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1238 { 1239 int error, i, n; 1240 struct inpcb *marker; 1241 struct inpcb *inp; 1242 int origcpu, ccpu; 1243 1244 error = 0; 1245 n = 0; 1246 1247 /* 1248 * The process of preparing the TCB list is too time-consuming and 1249 * resource-intensive to repeat twice on every request. 1250 */ 1251 if (req->oldptr == NULL) { 1252 for (ccpu = 0; ccpu < netisr_ncpus; ++ccpu) 1253 n += tcbinfo[ccpu].ipi_count; 1254 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb); 1255 return (0); 1256 } 1257 1258 if (req->newptr != NULL) 1259 return (EPERM); 1260 1261 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); 1262 marker->inp_flags |= INP_PLACEMARKER; 1263 1264 /* 1265 * OK, now we're committed to doing something. Run the inpcb list 1266 * for each cpu in the system and construct the output. Use a 1267 * list placemarker to deal with list changes occuring during 1268 * copyout blockages (but otherwise depend on being on the correct 1269 * cpu to avoid races). 1270 */ 1271 origcpu = mycpu->gd_cpuid; 1272 for (ccpu = 0; ccpu < netisr_ncpus && error == 0; ++ccpu) { 1273 caddr_t inp_ppcb; 1274 struct xtcpcb xt; 1275 1276 lwkt_migratecpu(ccpu); 1277 1278 n = tcbinfo[ccpu].ipi_count; 1279 1280 LIST_INSERT_HEAD(&tcbinfo[ccpu].pcblisthead, marker, inp_list); 1281 i = 0; 1282 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { 1283 /* 1284 * process a snapshot of pcbs, ignoring placemarkers 1285 * and using our own to allow SYSCTL_OUT to block. 1286 */ 1287 LIST_REMOVE(marker, inp_list); 1288 LIST_INSERT_AFTER(inp, marker, inp_list); 1289 1290 if (inp->inp_flags & INP_PLACEMARKER) 1291 continue; 1292 if (prison_xinpcb(req->td, inp)) 1293 continue; 1294 1295 xt.xt_len = sizeof xt; 1296 bcopy(inp, &xt.xt_inp, sizeof *inp); 1297 inp_ppcb = inp->inp_ppcb; 1298 if (inp_ppcb != NULL) 1299 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 1300 else 1301 bzero(&xt.xt_tp, sizeof xt.xt_tp); 1302 if (inp->inp_socket) 1303 sotoxsocket(inp->inp_socket, &xt.xt_socket); 1304 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) 1305 break; 1306 ++i; 1307 } 1308 LIST_REMOVE(marker, inp_list); 1309 if (error == 0 && i < n) { 1310 bzero(&xt, sizeof xt); 1311 xt.xt_len = sizeof xt; 1312 while (i < n) { 1313 error = SYSCTL_OUT(req, &xt, sizeof xt); 1314 if (error) 1315 break; 1316 ++i; 1317 } 1318 } 1319 } 1320 1321 /* 1322 * Make sure we are on the same cpu we were on originally, since 1323 * higher level callers expect this. Also don't pollute caches with 1324 * migrated userland data by (eventually) returning to userland 1325 * on a different cpu. 1326 */ 1327 lwkt_migratecpu(origcpu); 1328 kfree(marker, M_TEMP); 1329 return (error); 1330 } 1331 1332 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1333 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1334 1335 static int 1336 tcp_getcred(SYSCTL_HANDLER_ARGS) 1337 { 1338 struct sockaddr_in addrs[2]; 1339 struct ucred cred0, *cred = NULL; 1340 struct inpcb *inp; 1341 int cpu, origcpu, error; 1342 1343 error = priv_check(req->td, PRIV_ROOT); 1344 if (error != 0) 1345 return (error); 1346 error = SYSCTL_IN(req, addrs, sizeof addrs); 1347 if (error != 0) 1348 return (error); 1349 1350 origcpu = mycpuid; 1351 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 1352 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 1353 1354 lwkt_migratecpu(cpu); 1355 1356 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 1357 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1358 if (inp == NULL || inp->inp_socket == NULL) { 1359 error = ENOENT; 1360 } else if (inp->inp_socket->so_cred != NULL) { 1361 cred0 = *(inp->inp_socket->so_cred); 1362 cred = &cred0; 1363 } 1364 1365 lwkt_migratecpu(origcpu); 1366 1367 if (error) 1368 return (error); 1369 1370 return SYSCTL_OUT(req, cred, sizeof(struct ucred)); 1371 } 1372 1373 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1374 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 1375 1376 #ifdef INET6 1377 static int 1378 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1379 { 1380 struct sockaddr_in6 addrs[2]; 1381 struct inpcb *inp; 1382 int error; 1383 1384 error = priv_check(req->td, PRIV_ROOT); 1385 if (error != 0) 1386 return (error); 1387 error = SYSCTL_IN(req, addrs, sizeof addrs); 1388 if (error != 0) 1389 return (error); 1390 crit_enter(); 1391 inp = in6_pcblookup_hash(&tcbinfo[0], 1392 &addrs[1].sin6_addr, addrs[1].sin6_port, 1393 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL); 1394 if (inp == NULL || inp->inp_socket == NULL) { 1395 error = ENOENT; 1396 goto out; 1397 } 1398 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1399 out: 1400 crit_exit(); 1401 return (error); 1402 } 1403 1404 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1405 0, 0, 1406 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1407 #endif 1408 1409 struct netmsg_tcp_notify { 1410 struct netmsg_base base; 1411 inp_notify_t nm_notify; 1412 struct in_addr nm_faddr; 1413 int nm_arg; 1414 }; 1415 1416 static void 1417 tcp_notifyall_oncpu(netmsg_t msg) 1418 { 1419 struct netmsg_tcp_notify *nm = (struct netmsg_tcp_notify *)msg; 1420 int nextcpu; 1421 1422 ASSERT_NETISR_NCPUS(mycpuid); 1423 1424 in_pcbnotifyall(&tcbinfo[mycpuid], nm->nm_faddr, 1425 nm->nm_arg, nm->nm_notify); 1426 1427 nextcpu = mycpuid + 1; 1428 if (nextcpu < netisr_ncpus) 1429 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg); 1430 else 1431 lwkt_replymsg(&nm->base.lmsg, 0); 1432 } 1433 1434 inp_notify_t 1435 tcp_get_inpnotify(int cmd, const struct sockaddr *sa, 1436 int *arg, struct ip **ip0, int *cpuid) 1437 { 1438 struct ip *ip = *ip0; 1439 struct in_addr faddr; 1440 inp_notify_t notify = tcp_notify; 1441 1442 faddr = ((const struct sockaddr_in *)sa)->sin_addr; 1443 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1444 return NULL; 1445 1446 *arg = inetctlerrmap[cmd]; 1447 if (cmd == PRC_QUENCH) { 1448 notify = tcp_quench; 1449 } else if (icmp_may_rst && 1450 (cmd == PRC_UNREACH_ADMIN_PROHIB || 1451 cmd == PRC_UNREACH_PORT || 1452 cmd == PRC_TIMXCEED_INTRANS) && 1453 ip != NULL) { 1454 notify = tcp_drop_syn_sent; 1455 } else if (cmd == PRC_MSGSIZE) { 1456 const struct icmp *icmp = (const struct icmp *) 1457 ((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 1458 1459 *arg = ntohs(icmp->icmp_nextmtu); 1460 notify = tcp_mtudisc; 1461 } else if (PRC_IS_REDIRECT(cmd)) { 1462 ip = NULL; 1463 notify = in_rtchange; 1464 } else if (cmd == PRC_HOSTDEAD) { 1465 ip = NULL; 1466 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 1467 return NULL; 1468 } 1469 1470 if (cpuid != NULL) { 1471 if (ip == NULL) { 1472 /* Go through all effective netisr CPUs. */ 1473 *cpuid = netisr_ncpus; 1474 } else { 1475 const struct tcphdr *th; 1476 1477 th = (const struct tcphdr *) 1478 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2)); 1479 *cpuid = tcp_addrcpu(faddr.s_addr, th->th_dport, 1480 ip->ip_src.s_addr, th->th_sport); 1481 } 1482 } 1483 1484 *ip0 = ip; 1485 return notify; 1486 } 1487 1488 void 1489 tcp_ctlinput(netmsg_t msg) 1490 { 1491 int cmd = msg->ctlinput.nm_cmd; 1492 struct sockaddr *sa = msg->ctlinput.nm_arg; 1493 struct ip *ip = msg->ctlinput.nm_extra; 1494 struct in_addr faddr; 1495 inp_notify_t notify; 1496 int arg, cpuid; 1497 1498 ASSERT_NETISR_NCPUS(mycpuid); 1499 1500 notify = tcp_get_inpnotify(cmd, sa, &arg, &ip, &cpuid); 1501 if (notify == NULL) 1502 goto done; 1503 1504 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1505 if (ip != NULL) { 1506 const struct tcphdr *th; 1507 struct inpcb *inp; 1508 1509 if (cpuid != mycpuid) 1510 goto done; 1511 1512 th = (const struct tcphdr *) 1513 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2)); 1514 inp = in_pcblookup_hash(&tcbinfo[mycpuid], faddr, th->th_dport, 1515 ip->ip_src, th->th_sport, 0, NULL); 1516 if (inp != NULL && inp->inp_socket != NULL) { 1517 tcp_seq icmpseq = htonl(th->th_seq); 1518 struct tcpcb *tp = intotcpcb(inp); 1519 1520 if (SEQ_GEQ(icmpseq, tp->snd_una) && 1521 SEQ_LT(icmpseq, tp->snd_max)) 1522 notify(inp, arg); 1523 } else { 1524 struct in_conninfo inc; 1525 1526 inc.inc_fport = th->th_dport; 1527 inc.inc_lport = th->th_sport; 1528 inc.inc_faddr = faddr; 1529 inc.inc_laddr = ip->ip_src; 1530 #ifdef INET6 1531 inc.inc_isipv6 = 0; 1532 #endif 1533 syncache_unreach(&inc, th); 1534 } 1535 } else if (msg->ctlinput.nm_direct) { 1536 if (cpuid != netisr_ncpus && cpuid != mycpuid) 1537 goto done; 1538 1539 in_pcbnotifyall(&tcbinfo[mycpuid], faddr, arg, notify); 1540 } else { 1541 struct netmsg_tcp_notify *nm; 1542 1543 ASSERT_NETISR0; 1544 nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT); 1545 netmsg_init(&nm->base, NULL, &netisr_afree_rport, 1546 0, tcp_notifyall_oncpu); 1547 nm->nm_faddr = faddr; 1548 nm->nm_arg = arg; 1549 nm->nm_notify = notify; 1550 1551 lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg); 1552 } 1553 done: 1554 lwkt_replymsg(&msg->lmsg, 0); 1555 } 1556 1557 #ifdef INET6 1558 1559 void 1560 tcp6_ctlinput(netmsg_t msg) 1561 { 1562 int cmd = msg->ctlinput.nm_cmd; 1563 struct sockaddr *sa = msg->ctlinput.nm_arg; 1564 void *d = msg->ctlinput.nm_extra; 1565 struct tcphdr th; 1566 inp_notify_t notify = tcp_notify; 1567 struct ip6_hdr *ip6; 1568 struct mbuf *m; 1569 struct ip6ctlparam *ip6cp = NULL; 1570 const struct sockaddr_in6 *sa6_src = NULL; 1571 int off; 1572 struct tcp_portonly { 1573 u_int16_t th_sport; 1574 u_int16_t th_dport; 1575 } *thp; 1576 int arg; 1577 1578 if (sa->sa_family != AF_INET6 || 1579 sa->sa_len != sizeof(struct sockaddr_in6)) { 1580 goto out; 1581 } 1582 1583 arg = 0; 1584 if (cmd == PRC_QUENCH) 1585 notify = tcp_quench; 1586 else if (cmd == PRC_MSGSIZE) { 1587 struct ip6ctlparam *ip6cp = d; 1588 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; 1589 1590 arg = ntohl(icmp6->icmp6_mtu); 1591 notify = tcp_mtudisc; 1592 } else if (!PRC_IS_REDIRECT(cmd) && 1593 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) { 1594 goto out; 1595 } 1596 1597 /* if the parameter is from icmp6, decode it. */ 1598 if (d != NULL) { 1599 ip6cp = (struct ip6ctlparam *)d; 1600 m = ip6cp->ip6c_m; 1601 ip6 = ip6cp->ip6c_ip6; 1602 off = ip6cp->ip6c_off; 1603 sa6_src = ip6cp->ip6c_src; 1604 } else { 1605 m = NULL; 1606 ip6 = NULL; 1607 off = 0; /* fool gcc */ 1608 sa6_src = &sa6_any; 1609 } 1610 1611 if (ip6 != NULL) { 1612 struct in_conninfo inc; 1613 /* 1614 * XXX: We assume that when IPV6 is non NULL, 1615 * M and OFF are valid. 1616 */ 1617 1618 /* check if we can safely examine src and dst ports */ 1619 if (m->m_pkthdr.len < off + sizeof *thp) 1620 goto out; 1621 1622 bzero(&th, sizeof th); 1623 m_copydata(m, off, sizeof *thp, (caddr_t)&th); 1624 1625 in6_pcbnotify(&tcbinfo[0], sa, th.th_dport, 1626 (struct sockaddr *)ip6cp->ip6c_src, 1627 th.th_sport, cmd, arg, notify); 1628 1629 inc.inc_fport = th.th_dport; 1630 inc.inc_lport = th.th_sport; 1631 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1632 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1633 inc.inc_isipv6 = 1; 1634 syncache_unreach(&inc, &th); 1635 } else { 1636 in6_pcbnotify(&tcbinfo[0], sa, 0, 1637 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify); 1638 } 1639 out: 1640 lwkt_replymsg(&msg->ctlinput.base.lmsg, 0); 1641 } 1642 1643 #endif 1644 1645 /* 1646 * Following is where TCP initial sequence number generation occurs. 1647 * 1648 * There are two places where we must use initial sequence numbers: 1649 * 1. In SYN-ACK packets. 1650 * 2. In SYN packets. 1651 * 1652 * All ISNs for SYN-ACK packets are generated by the syncache. See 1653 * tcp_syncache.c for details. 1654 * 1655 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1656 * depends on this property. In addition, these ISNs should be 1657 * unguessable so as to prevent connection hijacking. To satisfy 1658 * the requirements of this situation, the algorithm outlined in 1659 * RFC 1948 is used to generate sequence numbers. 1660 * 1661 * Implementation details: 1662 * 1663 * Time is based off the system timer, and is corrected so that it 1664 * increases by one megabyte per second. This allows for proper 1665 * recycling on high speed LANs while still leaving over an hour 1666 * before rollover. 1667 * 1668 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1669 * between seeding of isn_secret. This is normally set to zero, 1670 * as reseeding should not be necessary. 1671 * 1672 */ 1673 1674 #define ISN_BYTES_PER_SECOND 1048576 1675 1676 u_char isn_secret[32]; 1677 int isn_last_reseed; 1678 MD5_CTX isn_ctx; 1679 1680 tcp_seq 1681 tcp_new_isn(struct tcpcb *tp) 1682 { 1683 u_int32_t md5_buffer[4]; 1684 tcp_seq new_isn; 1685 1686 /* Seed if this is the first use, reseed if requested. */ 1687 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1688 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1689 < (u_int)ticks))) { 1690 read_random_unlimited(&isn_secret, sizeof isn_secret); 1691 isn_last_reseed = ticks; 1692 } 1693 1694 /* Compute the md5 hash and return the ISN. */ 1695 MD5Init(&isn_ctx); 1696 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_fport, sizeof(u_short)); 1697 MD5Update(&isn_ctx, (u_char *)&tp->t_inpcb->inp_lport, sizeof(u_short)); 1698 #ifdef INET6 1699 if (INP_ISIPV6(tp->t_inpcb)) { 1700 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1701 sizeof(struct in6_addr)); 1702 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1703 sizeof(struct in6_addr)); 1704 } else 1705 #endif 1706 { 1707 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1708 sizeof(struct in_addr)); 1709 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1710 sizeof(struct in_addr)); 1711 } 1712 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1713 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1714 new_isn = (tcp_seq) md5_buffer[0]; 1715 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1716 return (new_isn); 1717 } 1718 1719 /* 1720 * When a source quench is received, close congestion window 1721 * to one segment. We will gradually open it again as we proceed. 1722 */ 1723 void 1724 tcp_quench(struct inpcb *inp, int error) 1725 { 1726 struct tcpcb *tp = intotcpcb(inp); 1727 1728 KASSERT(tp != NULL, ("tcp_quench: tp is NULL")); 1729 tp->snd_cwnd = tp->t_maxseg; 1730 tp->snd_wacked = 0; 1731 } 1732 1733 /* 1734 * When a specific ICMP unreachable message is received and the 1735 * connection state is SYN-SENT, drop the connection. This behavior 1736 * is controlled by the icmp_may_rst sysctl. 1737 */ 1738 void 1739 tcp_drop_syn_sent(struct inpcb *inp, int error) 1740 { 1741 struct tcpcb *tp = intotcpcb(inp); 1742 1743 KASSERT(tp != NULL, ("tcp_drop_syn_sent: tp is NULL")); 1744 if (tp->t_state == TCPS_SYN_SENT) 1745 tcp_drop(tp, error); 1746 } 1747 1748 /* 1749 * When a `need fragmentation' ICMP is received, update our idea of the MSS 1750 * based on the new value in the route. Also nudge TCP to send something, 1751 * since we know the packet we just sent was dropped. 1752 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1753 */ 1754 void 1755 tcp_mtudisc(struct inpcb *inp, int mtu) 1756 { 1757 struct tcpcb *tp = intotcpcb(inp); 1758 struct rtentry *rt; 1759 struct socket *so = inp->inp_socket; 1760 int maxopd, mss; 1761 #ifdef INET6 1762 boolean_t isipv6 = INP_ISIPV6(inp); 1763 #else 1764 const boolean_t isipv6 = FALSE; 1765 #endif 1766 1767 KASSERT(tp != NULL, ("tcp_mtudisc: tp is NULL")); 1768 1769 /* 1770 * If no MTU is provided in the ICMP message, use the 1771 * next lower likely value, as specified in RFC 1191. 1772 */ 1773 if (mtu == 0) { 1774 int oldmtu; 1775 1776 oldmtu = tp->t_maxopd + 1777 (isipv6 ? 1778 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1779 sizeof(struct tcpiphdr)); 1780 mtu = ip_next_mtu(oldmtu, 0); 1781 } 1782 1783 if (isipv6) 1784 rt = tcp_rtlookup6(&inp->inp_inc); 1785 else 1786 rt = tcp_rtlookup(&inp->inp_inc); 1787 if (rt != NULL) { 1788 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu) 1789 mtu = rt->rt_rmx.rmx_mtu; 1790 1791 maxopd = mtu - 1792 (isipv6 ? 1793 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1794 sizeof(struct tcpiphdr)); 1795 1796 /* 1797 * XXX - The following conditional probably violates the TCP 1798 * spec. The problem is that, since we don't know the 1799 * other end's MSS, we are supposed to use a conservative 1800 * default. But, if we do that, then MTU discovery will 1801 * never actually take place, because the conservative 1802 * default is much less than the MTUs typically seen 1803 * on the Internet today. For the moment, we'll sweep 1804 * this under the carpet. 1805 * 1806 * The conservative default might not actually be a problem 1807 * if the only case this occurs is when sending an initial 1808 * SYN with options and data to a host we've never talked 1809 * to before. Then, they will reply with an MSS value which 1810 * will get recorded and the new parameters should get 1811 * recomputed. For Further Study. 1812 */ 1813 if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd) 1814 maxopd = rt->rt_rmx.rmx_mssopt; 1815 } else 1816 maxopd = mtu - 1817 (isipv6 ? 1818 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1819 sizeof(struct tcpiphdr)); 1820 1821 if (tp->t_maxopd <= maxopd) 1822 return; 1823 tp->t_maxopd = maxopd; 1824 1825 mss = maxopd; 1826 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) == 1827 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 1828 mss -= TCPOLEN_TSTAMP_APPA; 1829 1830 /* round down to multiple of MCLBYTES */ 1831 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */ 1832 if (mss > MCLBYTES) 1833 mss &= ~(MCLBYTES - 1); 1834 #else 1835 if (mss > MCLBYTES) 1836 mss = (mss / MCLBYTES) * MCLBYTES; 1837 #endif 1838 1839 if (so->so_snd.ssb_hiwat < mss) 1840 mss = so->so_snd.ssb_hiwat; 1841 1842 tp->t_maxseg = mss; 1843 tp->t_rtttime = 0; 1844 tp->snd_nxt = tp->snd_una; 1845 tcp_output(tp); 1846 tcpstat.tcps_mturesent++; 1847 } 1848 1849 /* 1850 * Look-up the routing entry to the peer of this inpcb. If no route 1851 * is found and it cannot be allocated the return NULL. This routine 1852 * is called by TCP routines that access the rmx structure and by tcp_mss 1853 * to get the interface MTU. 1854 */ 1855 struct rtentry * 1856 tcp_rtlookup(struct in_conninfo *inc) 1857 { 1858 struct route *ro = &inc->inc_route; 1859 1860 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) { 1861 /* No route yet, so try to acquire one */ 1862 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1863 /* 1864 * unused portions of the structure MUST be zero'd 1865 * out because rtalloc() treats it as opaque data 1866 */ 1867 bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); 1868 ro->ro_dst.sa_family = AF_INET; 1869 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1870 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1871 inc->inc_faddr; 1872 rtalloc(ro); 1873 } 1874 } 1875 return (ro->ro_rt); 1876 } 1877 1878 #ifdef INET6 1879 struct rtentry * 1880 tcp_rtlookup6(struct in_conninfo *inc) 1881 { 1882 struct route_in6 *ro6 = &inc->inc6_route; 1883 1884 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) { 1885 /* No route yet, so try to acquire one */ 1886 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1887 /* 1888 * unused portions of the structure MUST be zero'd 1889 * out because rtalloc() treats it as opaque data 1890 */ 1891 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); 1892 ro6->ro_dst.sin6_family = AF_INET6; 1893 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1894 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1895 rtalloc((struct route *)ro6); 1896 } 1897 } 1898 return (ro6->ro_rt); 1899 } 1900 #endif 1901 1902 #ifdef IPSEC 1903 /* compute ESP/AH header size for TCP, including outer IP header. */ 1904 size_t 1905 ipsec_hdrsiz_tcp(struct tcpcb *tp) 1906 { 1907 struct inpcb *inp; 1908 struct mbuf *m; 1909 size_t hdrsiz; 1910 struct ip *ip; 1911 struct tcphdr *th; 1912 1913 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1914 return (0); 1915 MGETHDR(m, M_NOWAIT, MT_DATA); 1916 if (!m) 1917 return (0); 1918 1919 #ifdef INET6 1920 if (INP_ISIPV6(inp)) { 1921 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 1922 1923 th = (struct tcphdr *)(ip6 + 1); 1924 m->m_pkthdr.len = m->m_len = 1925 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1926 tcp_fillheaders(tp, ip6, th, FALSE); 1927 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1928 } else 1929 #endif 1930 { 1931 ip = mtod(m, struct ip *); 1932 th = (struct tcphdr *)(ip + 1); 1933 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1934 tcp_fillheaders(tp, ip, th, FALSE); 1935 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1936 } 1937 1938 m_free(m); 1939 return (hdrsiz); 1940 } 1941 #endif 1942 1943 /* 1944 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1945 * 1946 * This code attempts to calculate the bandwidth-delay product as a 1947 * means of determining the optimal window size to maximize bandwidth, 1948 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1949 * routers. This code also does a fairly good job keeping RTTs in check 1950 * across slow links like modems. We implement an algorithm which is very 1951 * similar (but not meant to be) TCP/Vegas. The code operates on the 1952 * transmitter side of a TCP connection and so only effects the transmit 1953 * side of the connection. 1954 * 1955 * BACKGROUND: TCP makes no provision for the management of buffer space 1956 * at the end points or at the intermediate routers and switches. A TCP 1957 * stream, whether using NewReno or not, will eventually buffer as 1958 * many packets as it is able and the only reason this typically works is 1959 * due to the fairly small default buffers made available for a connection 1960 * (typicaly 16K or 32K). As machines use larger windows and/or window 1961 * scaling it is now fairly easy for even a single TCP connection to blow-out 1962 * all available buffer space not only on the local interface, but on 1963 * intermediate routers and switches as well. NewReno makes a misguided 1964 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1965 * then backing off, then steadily increasing the window again until another 1966 * failure occurs, ad-infinitum. This results in terrible oscillation that 1967 * is only made worse as network loads increase and the idea of intentionally 1968 * blowing out network buffers is, frankly, a terrible way to manage network 1969 * resources. 1970 * 1971 * It is far better to limit the transmit window prior to the failure 1972 * condition being achieved. There are two general ways to do this: First 1973 * you can 'scan' through different transmit window sizes and locate the 1974 * point where the RTT stops increasing, indicating that you have filled the 1975 * pipe, then scan backwards until you note that RTT stops decreasing, then 1976 * repeat ad-infinitum. This method works in principle but has severe 1977 * implementation issues due to RTT variances, timer granularity, and 1978 * instability in the algorithm which can lead to many false positives and 1979 * create oscillations as well as interact badly with other TCP streams 1980 * implementing the same algorithm. 1981 * 1982 * The second method is to limit the window to the bandwidth delay product 1983 * of the link. This is the method we implement. RTT variances and our 1984 * own manipulation of the congestion window, bwnd, can potentially 1985 * destabilize the algorithm. For this reason we have to stabilize the 1986 * elements used to calculate the window. We do this by using the minimum 1987 * observed RTT, the long term average of the observed bandwidth, and 1988 * by adding two segments worth of slop. It isn't perfect but it is able 1989 * to react to changing conditions and gives us a very stable basis on 1990 * which to extend the algorithm. 1991 */ 1992 void 1993 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1994 { 1995 u_long bw; 1996 u_long ibw; 1997 u_long bwnd; 1998 int save_ticks; 1999 int delta_ticks; 2000 2001 /* 2002 * If inflight_enable is disabled in the middle of a tcp connection, 2003 * make sure snd_bwnd is effectively disabled. 2004 */ 2005 if (!tcp_inflight_enable) { 2006 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 2007 tp->snd_bandwidth = 0; 2008 return; 2009 } 2010 2011 /* 2012 * Validate the delta time. If a connection is new or has been idle 2013 * a long time we have to reset the bandwidth calculator. 2014 */ 2015 save_ticks = ticks; 2016 cpu_ccfence(); 2017 delta_ticks = save_ticks - tp->t_bw_rtttime; 2018 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) { 2019 tp->t_bw_rtttime = save_ticks; 2020 tp->t_bw_rtseq = ack_seq; 2021 if (tp->snd_bandwidth == 0) 2022 tp->snd_bandwidth = tcp_inflight_start; 2023 return; 2024 } 2025 2026 /* 2027 * A delta of at least 1 tick is required. Waiting 2 ticks will 2028 * result in better (bw) accuracy. More than that and the ramp-up 2029 * will be too slow. 2030 */ 2031 if (delta_ticks == 0 || delta_ticks == 1) 2032 return; 2033 2034 /* 2035 * Sanity check, plus ignore pure window update acks. 2036 */ 2037 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0) 2038 return; 2039 2040 /* 2041 * Figure out the bandwidth. Due to the tick granularity this 2042 * is a very rough number and it MUST be averaged over a fairly 2043 * long period of time. XXX we need to take into account a link 2044 * that is not using all available bandwidth, but for now our 2045 * slop will ramp us up if this case occurs and the bandwidth later 2046 * increases. 2047 */ 2048 ibw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks; 2049 tp->t_bw_rtttime = save_ticks; 2050 tp->t_bw_rtseq = ack_seq; 2051 bw = ((int64_t)tp->snd_bandwidth * 15 + ibw) >> 4; 2052 2053 tp->snd_bandwidth = bw; 2054 2055 /* 2056 * Calculate the semi-static bandwidth delay product, plus two maximal 2057 * segments. The additional slop puts us squarely in the sweet 2058 * spot and also handles the bandwidth run-up case. Without the 2059 * slop we could be locking ourselves into a lower bandwidth. 2060 * 2061 * At very high speeds the bw calculation can become overly sensitive 2062 * and error prone when delta_ticks is low (e.g. usually 1). To deal 2063 * with the problem the stab must be scaled to the bw. A stab of 50 2064 * (the default) increases the bw for the purposes of the bwnd 2065 * calculation by 5%. 2066 * 2067 * Situations Handled: 2068 * (1) Prevents over-queueing of packets on LANs, especially on 2069 * high speed LANs, allowing larger TCP buffers to be 2070 * specified, and also does a good job preventing 2071 * over-queueing of packets over choke points like modems 2072 * (at least for the transmit side). 2073 * 2074 * (2) Is able to handle changing network loads (bandwidth 2075 * drops so bwnd drops, bandwidth increases so bwnd 2076 * increases). 2077 * 2078 * (3) Theoretically should stabilize in the face of multiple 2079 * connections implementing the same algorithm (this may need 2080 * a little work). 2081 * 2082 * (4) Stability value (defaults to 20 = 2 maximal packets) can 2083 * be adjusted with a sysctl but typically only needs to be on 2084 * very slow connections. A value no smaller then 5 should 2085 * be used, but only reduce this default if you have no other 2086 * choice. 2087 */ 2088 2089 #define USERTT ((tp->t_srtt + tp->t_rttvar) + tcp_inflight_adjrtt) 2090 bw += bw * tcp_inflight_stab / 1000; 2091 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 2092 (int)tp->t_maxseg * 2; 2093 #undef USERTT 2094 2095 if (tcp_inflight_debug > 0) { 2096 static int ltime; 2097 if ((u_int)(save_ticks - ltime) >= hz / tcp_inflight_debug) { 2098 ltime = save_ticks; 2099 kprintf("%p ibw %ld bw %ld rttvar %d srtt %d " 2100 "bwnd %ld delta %d snd_win %ld\n", 2101 tp, ibw, bw, tp->t_rttvar, tp->t_srtt, 2102 bwnd, delta_ticks, tp->snd_wnd); 2103 } 2104 } 2105 if ((long)bwnd < tcp_inflight_min) 2106 bwnd = tcp_inflight_min; 2107 if (bwnd > tcp_inflight_max) 2108 bwnd = tcp_inflight_max; 2109 if ((long)bwnd < tp->t_maxseg * 2) 2110 bwnd = tp->t_maxseg * 2; 2111 tp->snd_bwnd = bwnd; 2112 } 2113 2114 static void 2115 tcp_rmx_iwsegs(struct tcpcb *tp, u_long *maxsegs, u_long *capsegs) 2116 { 2117 struct rtentry *rt; 2118 struct inpcb *inp = tp->t_inpcb; 2119 #ifdef INET6 2120 boolean_t isipv6 = INP_ISIPV6(inp); 2121 #else 2122 const boolean_t isipv6 = FALSE; 2123 #endif 2124 2125 /* XXX */ 2126 if (tcp_iw_maxsegs < TCP_IW_MAXSEGS_DFLT) 2127 tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT; 2128 if (tcp_iw_capsegs < TCP_IW_CAPSEGS_DFLT) 2129 tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT; 2130 2131 if (isipv6) 2132 rt = tcp_rtlookup6(&inp->inp_inc); 2133 else 2134 rt = tcp_rtlookup(&inp->inp_inc); 2135 if (rt == NULL || 2136 rt->rt_rmx.rmx_iwmaxsegs < TCP_IW_MAXSEGS_DFLT || 2137 rt->rt_rmx.rmx_iwcapsegs < TCP_IW_CAPSEGS_DFLT) { 2138 *maxsegs = tcp_iw_maxsegs; 2139 *capsegs = tcp_iw_capsegs; 2140 return; 2141 } 2142 *maxsegs = rt->rt_rmx.rmx_iwmaxsegs; 2143 *capsegs = rt->rt_rmx.rmx_iwcapsegs; 2144 } 2145 2146 u_long 2147 tcp_initial_window(struct tcpcb *tp) 2148 { 2149 if (tcp_do_rfc3390) { 2150 /* 2151 * RFC3390: 2152 * "If the SYN or SYN/ACK is lost, the initial window 2153 * used by a sender after a correctly transmitted SYN 2154 * MUST be one segment consisting of MSS bytes." 2155 * 2156 * However, we do something a little bit more aggressive 2157 * then RFC3390 here: 2158 * - Only if time spent in the SYN or SYN|ACK retransmition 2159 * >= 3 seconds, the IW is reduced. We do this mainly 2160 * because when RFC3390 is published, the initial RTO is 2161 * still 3 seconds (the threshold we test here), while 2162 * after RFC6298, the initial RTO is 1 second. This 2163 * behaviour probably still falls within the spirit of 2164 * RFC3390. 2165 * - When IW is reduced, 2*MSS is used instead of 1*MSS. 2166 * Mainly to avoid sender and receiver deadlock until 2167 * delayed ACK timer expires. And even RFC2581 does not 2168 * try to reduce IW upon SYN or SYN|ACK retransmition 2169 * timeout. 2170 * 2171 * See also: 2172 * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03 2173 */ 2174 if (tp->t_rxtsyn >= TCPTV_RTOBASE3) { 2175 return (2 * tp->t_maxseg); 2176 } else { 2177 u_long maxsegs, capsegs; 2178 2179 tcp_rmx_iwsegs(tp, &maxsegs, &capsegs); 2180 return min(maxsegs * tp->t_maxseg, 2181 max(2 * tp->t_maxseg, capsegs * 1460)); 2182 } 2183 } else { 2184 /* 2185 * Even RFC2581 (back to 1999) allows 2*SMSS IW. 2186 * 2187 * Mainly to avoid sender and receiver deadlock 2188 * until delayed ACK timer expires. 2189 */ 2190 return (2 * tp->t_maxseg); 2191 } 2192 } 2193 2194 #ifdef TCP_SIGNATURE 2195 /* 2196 * Compute TCP-MD5 hash of a TCP segment. (RFC2385) 2197 * 2198 * We do this over ip, tcphdr, segment data, and the key in the SADB. 2199 * When called from tcp_input(), we can be sure that th_sum has been 2200 * zeroed out and verified already. 2201 * 2202 * Return 0 if successful, otherwise return -1. 2203 * 2204 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a 2205 * search with the destination IP address, and a 'magic SPI' to be 2206 * determined by the application. This is hardcoded elsewhere to 1179 2207 * right now. Another branch of this code exists which uses the SPD to 2208 * specify per-application flows but it is unstable. 2209 */ 2210 int 2211 tcpsignature_compute( 2212 struct mbuf *m, /* mbuf chain */ 2213 int len, /* length of TCP data */ 2214 int optlen, /* length of TCP options */ 2215 u_char *buf, /* storage for MD5 digest */ 2216 u_int direction) /* direction of flow */ 2217 { 2218 struct ippseudo ippseudo; 2219 MD5_CTX ctx; 2220 int doff; 2221 struct ip *ip; 2222 struct ipovly *ipovly; 2223 struct secasvar *sav; 2224 struct tcphdr *th; 2225 #ifdef INET6 2226 struct ip6_hdr *ip6; 2227 struct in6_addr in6; 2228 uint32_t plen; 2229 uint16_t nhdr; 2230 #endif /* INET6 */ 2231 u_short savecsum; 2232 2233 KASSERT(m != NULL, ("passed NULL mbuf. Game over.")); 2234 KASSERT(buf != NULL, ("passed NULL storage pointer for MD5 signature")); 2235 /* 2236 * Extract the destination from the IP header in the mbuf. 2237 */ 2238 ip = mtod(m, struct ip *); 2239 #ifdef INET6 2240 ip6 = NULL; /* Make the compiler happy. */ 2241 #endif /* INET6 */ 2242 /* 2243 * Look up an SADB entry which matches the address found in 2244 * the segment. 2245 */ 2246 switch (IP_VHL_V(ip->ip_vhl)) { 2247 case IPVERSION: 2248 sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, 2249 IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2250 break; 2251 #ifdef INET6 2252 case (IPV6_VERSION >> 4): 2253 ip6 = mtod(m, struct ip6_hdr *); 2254 sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, 2255 IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2256 break; 2257 #endif /* INET6 */ 2258 default: 2259 return (EINVAL); 2260 /* NOTREACHED */ 2261 break; 2262 } 2263 if (sav == NULL) { 2264 kprintf("%s: SADB lookup failed\n", __func__); 2265 return (EINVAL); 2266 } 2267 MD5Init(&ctx); 2268 2269 /* 2270 * Step 1: Update MD5 hash with IP pseudo-header. 2271 * 2272 * XXX The ippseudo header MUST be digested in network byte order, 2273 * or else we'll fail the regression test. Assume all fields we've 2274 * been doing arithmetic on have been in host byte order. 2275 * XXX One cannot depend on ipovly->ih_len here. When called from 2276 * tcp_output(), the underlying ip_len member has not yet been set. 2277 */ 2278 switch (IP_VHL_V(ip->ip_vhl)) { 2279 case IPVERSION: 2280 ipovly = (struct ipovly *)ip; 2281 ippseudo.ippseudo_src = ipovly->ih_src; 2282 ippseudo.ippseudo_dst = ipovly->ih_dst; 2283 ippseudo.ippseudo_pad = 0; 2284 ippseudo.ippseudo_p = IPPROTO_TCP; 2285 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen); 2286 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo)); 2287 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip)); 2288 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen; 2289 break; 2290 #ifdef INET6 2291 /* 2292 * RFC 2385, 2.0 Proposal 2293 * For IPv6, the pseudo-header is as described in RFC 2460, namely the 2294 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero- 2295 * extended next header value (to form 32 bits), and 32-bit segment 2296 * length. 2297 * Note: Upper-Layer Packet Length comes before Next Header. 2298 */ 2299 case (IPV6_VERSION >> 4): 2300 in6 = ip6->ip6_src; 2301 in6_clearscope(&in6); 2302 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); 2303 in6 = ip6->ip6_dst; 2304 in6_clearscope(&in6); 2305 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); 2306 plen = htonl(len + sizeof(struct tcphdr) + optlen); 2307 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t)); 2308 nhdr = 0; 2309 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2310 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2311 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2312 nhdr = IPPROTO_TCP; 2313 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2314 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr)); 2315 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen; 2316 break; 2317 #endif /* INET6 */ 2318 default: 2319 return (EINVAL); 2320 /* NOTREACHED */ 2321 break; 2322 } 2323 /* 2324 * Step 2: Update MD5 hash with TCP header, excluding options. 2325 * The TCP checksum must be set to zero. 2326 */ 2327 savecsum = th->th_sum; 2328 th->th_sum = 0; 2329 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr)); 2330 th->th_sum = savecsum; 2331 /* 2332 * Step 3: Update MD5 hash with TCP segment data. 2333 * Use m_apply() to avoid an early m_pullup(). 2334 */ 2335 if (len > 0) 2336 m_apply(m, doff, len, tcpsignature_apply, &ctx); 2337 /* 2338 * Step 4: Update MD5 hash with shared secret. 2339 */ 2340 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); 2341 MD5Final(buf, &ctx); 2342 key_sa_recordxfer(sav, m); 2343 key_freesav(sav); 2344 return (0); 2345 } 2346 2347 int 2348 tcpsignature_apply(void *fstate, void *data, unsigned int len) 2349 { 2350 2351 MD5Update((MD5_CTX *)fstate, (unsigned char *)data, len); 2352 return (0); 2353 } 2354 #endif /* TCP_SIGNATURE */ 2355 2356 static void 2357 tcp_drop_sysctl_dispatch(netmsg_t nmsg) 2358 { 2359 struct lwkt_msg *lmsg = &nmsg->lmsg; 2360 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 2361 struct sockaddr_storage *addrs = lmsg->u.ms_resultp; 2362 int error; 2363 struct sockaddr_in *fin, *lin; 2364 #ifdef INET6 2365 struct sockaddr_in6 *fin6, *lin6; 2366 struct in6_addr f6, l6; 2367 #endif 2368 struct inpcb *inp; 2369 2370 switch (addrs[0].ss_family) { 2371 #ifdef INET6 2372 case AF_INET6: 2373 fin6 = (struct sockaddr_in6 *)&addrs[0]; 2374 lin6 = (struct sockaddr_in6 *)&addrs[1]; 2375 error = in6_embedscope(&f6, fin6, NULL, NULL); 2376 if (error) 2377 goto done; 2378 error = in6_embedscope(&l6, lin6, NULL, NULL); 2379 if (error) 2380 goto done; 2381 inp = in6_pcblookup_hash(&tcbinfo[mycpuid], &f6, 2382 fin6->sin6_port, &l6, lin6->sin6_port, FALSE, NULL); 2383 break; 2384 #endif 2385 #ifdef INET 2386 case AF_INET: 2387 fin = (struct sockaddr_in *)&addrs[0]; 2388 lin = (struct sockaddr_in *)&addrs[1]; 2389 inp = in_pcblookup_hash(&tcbinfo[mycpuid], fin->sin_addr, 2390 fin->sin_port, lin->sin_addr, lin->sin_port, FALSE, NULL); 2391 break; 2392 #endif 2393 default: 2394 /* 2395 * Must not reach here, since the address family was 2396 * checked in sysctl handler. 2397 */ 2398 panic("unknown address family %d", addrs[0].ss_family); 2399 } 2400 if (inp != NULL) { 2401 struct tcpcb *tp = intotcpcb(inp); 2402 2403 KASSERT((inp->inp_flags & INP_WILDCARD) == 0, 2404 ("in wildcard hash")); 2405 KASSERT(tp != NULL, ("tcp_drop_sysctl_dispatch: tp is NULL")); 2406 KASSERT((tp->t_flags & TF_LISTEN) == 0, ("listen socket")); 2407 tcp_drop(tp, ECONNABORTED); 2408 error = 0; 2409 } else { 2410 error = ESRCH; 2411 } 2412 #ifdef INET6 2413 done: 2414 #endif 2415 lwkt_replymsg(lmsg, error); 2416 } 2417 2418 static int 2419 sysctl_tcp_drop(SYSCTL_HANDLER_ARGS) 2420 { 2421 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 2422 struct sockaddr_storage addrs[2]; 2423 struct sockaddr_in *fin, *lin; 2424 #ifdef INET6 2425 struct sockaddr_in6 *fin6, *lin6; 2426 #endif 2427 struct netmsg_base nmsg; 2428 struct lwkt_msg *lmsg = &nmsg.lmsg; 2429 struct lwkt_port *port = NULL; 2430 int error; 2431 2432 fin = lin = NULL; 2433 #ifdef INET6 2434 fin6 = lin6 = NULL; 2435 #endif 2436 error = 0; 2437 2438 if (req->oldptr != NULL || req->oldlen != 0) 2439 return (EINVAL); 2440 if (req->newptr == NULL) 2441 return (EPERM); 2442 if (req->newlen < sizeof(addrs)) 2443 return (ENOMEM); 2444 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 2445 if (error) 2446 return (error); 2447 2448 switch (addrs[0].ss_family) { 2449 #ifdef INET6 2450 case AF_INET6: 2451 fin6 = (struct sockaddr_in6 *)&addrs[0]; 2452 lin6 = (struct sockaddr_in6 *)&addrs[1]; 2453 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 2454 lin6->sin6_len != sizeof(struct sockaddr_in6)) 2455 return (EINVAL); 2456 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr) || 2457 IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 2458 return (EADDRNOTAVAIL); 2459 #if 0 2460 error = sa6_embedscope(fin6, V_ip6_use_defzone); 2461 if (error) 2462 return (error); 2463 error = sa6_embedscope(lin6, V_ip6_use_defzone); 2464 if (error) 2465 return (error); 2466 #endif 2467 port = tcp6_addrport(); 2468 break; 2469 #endif 2470 #ifdef INET 2471 case AF_INET: 2472 fin = (struct sockaddr_in *)&addrs[0]; 2473 lin = (struct sockaddr_in *)&addrs[1]; 2474 if (fin->sin_len != sizeof(struct sockaddr_in) || 2475 lin->sin_len != sizeof(struct sockaddr_in)) 2476 return (EINVAL); 2477 port = tcp_addrport(fin->sin_addr.s_addr, fin->sin_port, 2478 lin->sin_addr.s_addr, lin->sin_port); 2479 break; 2480 #endif 2481 default: 2482 return (EINVAL); 2483 } 2484 2485 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 2486 tcp_drop_sysctl_dispatch); 2487 lmsg->u.ms_resultp = addrs; 2488 return lwkt_domsg(port, lmsg, 0); 2489 } 2490 2491 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, drop, 2492 CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP, NULL, 2493 0, sysctl_tcp_drop, "", "Drop TCP connection"); 2494 2495 static int 2496 sysctl_tcps_count(SYSCTL_HANDLER_ARGS) 2497 { 2498 u_long state_count[TCP_NSTATES]; 2499 int cpu; 2500 2501 memset(state_count, 0, sizeof(state_count)); 2502 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 2503 int i; 2504 2505 for (i = 0; i < TCP_NSTATES; ++i) 2506 state_count[i] += tcpstate_count[cpu].tcps_count[i]; 2507 } 2508 2509 return sysctl_handle_opaque(oidp, state_count, sizeof(state_count), req); 2510 } 2511 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, state_count, 2512 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 2513 sysctl_tcps_count, "LU", "TCP connection counts by state"); 2514 2515 void 2516 tcp_pcbport_create(struct tcpcb *tp) 2517 { 2518 int cpu; 2519 2520 KASSERT((tp->t_flags & TF_LISTEN) && tp->t_state == TCPS_LISTEN, 2521 ("not a listen tcpcb")); 2522 2523 KASSERT(tp->t_pcbport == NULL, ("tcpcb port cache was created")); 2524 tp->t_pcbport = kmalloc_cachealign( 2525 sizeof(struct tcp_pcbport) * netisr_ncpus, M_PCB, M_WAITOK); 2526 2527 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 2528 struct inpcbport *phd; 2529 2530 phd = &tp->t_pcbport[cpu].t_phd; 2531 LIST_INIT(&phd->phd_pcblist); 2532 /* Though, not used ... */ 2533 phd->phd_port = tp->t_inpcb->inp_lport; 2534 } 2535 } 2536 2537 void 2538 tcp_pcbport_merge_oncpu(struct tcpcb *tp) 2539 { 2540 struct inpcbport *phd; 2541 struct inpcb *inp; 2542 int cpu = mycpuid; 2543 2544 KASSERT(cpu < netisr_ncpus, ("invalid cpu%d", cpu)); 2545 phd = &tp->t_pcbport[cpu].t_phd; 2546 2547 while ((inp = LIST_FIRST(&phd->phd_pcblist)) != NULL) { 2548 KASSERT(inp->inp_phd == phd && inp->inp_porthash == NULL, 2549 ("not on tcpcb port cache")); 2550 LIST_REMOVE(inp, inp_portlist); 2551 in_pcbinsporthash_lport(inp); 2552 KASSERT(inp->inp_phd == tp->t_inpcb->inp_phd && 2553 inp->inp_porthash == tp->t_inpcb->inp_porthash, 2554 ("tcpcb port cache merge failed")); 2555 } 2556 } 2557 2558 void 2559 tcp_pcbport_destroy(struct tcpcb *tp) 2560 { 2561 #ifdef INVARIANTS 2562 int cpu; 2563 2564 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 2565 KASSERT(LIST_EMPTY(&tp->t_pcbport[cpu].t_phd.phd_pcblist), 2566 ("tcpcb port cache is not empty")); 2567 } 2568 #endif 2569 kfree(tp->t_pcbport, M_PCB); 2570 tp->t_pcbport = NULL; 2571 } 2572