1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 63 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $ 64 */ 65 66 #include "opt_inet.h" 67 #include "opt_inet6.h" 68 #include "opt_tcpdebug.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/callout.h> 73 #include <sys/kernel.h> 74 #include <sys/sysctl.h> 75 #include <sys/malloc.h> 76 #include <sys/mpipe.h> 77 #include <sys/mbuf.h> 78 #ifdef INET6 79 #include <sys/domain.h> 80 #endif 81 #include <sys/proc.h> 82 #include <sys/caps.h> 83 #include <sys/socket.h> 84 #include <sys/socketops.h> 85 #include <sys/socketvar.h> 86 #include <sys/protosw.h> 87 #include <sys/random.h> 88 #include <sys/in_cksum.h> 89 #include <sys/ktr.h> 90 91 #include <net/route.h> 92 #include <net/if.h> 93 #include <net/netisr2.h> 94 95 #define _IP_VHL 96 #include <netinet/in.h> 97 #include <netinet/in_systm.h> 98 #include <netinet/ip.h> 99 #include <netinet/ip6.h> 100 #include <netinet/in_pcb.h> 101 #include <netinet6/in6_pcb.h> 102 #include <netinet/in_var.h> 103 #include <netinet/ip_var.h> 104 #include <netinet6/ip6_var.h> 105 #include <netinet/ip_icmp.h> 106 #ifdef INET6 107 #include <netinet/icmp6.h> 108 #endif 109 #include <netinet/tcp.h> 110 #include <netinet/tcp_fsm.h> 111 #include <netinet/tcp_seq.h> 112 #include <netinet/tcp_timer.h> 113 #include <netinet/tcp_timer2.h> 114 #include <netinet/tcp_var.h> 115 #include <netinet6/tcp6_var.h> 116 #include <netinet/tcpip.h> 117 #ifdef TCPDEBUG 118 #include <netinet/tcp_debug.h> 119 #endif 120 #include <netinet6/ip6protosw.h> 121 122 #include <sys/md5.h> 123 #include <machine/smp.h> 124 125 #include <sys/msgport2.h> 126 #include <net/netmsg2.h> 127 128 #if !defined(KTR_TCP) 129 #define KTR_TCP KTR_ALL 130 #endif 131 /* 132 KTR_INFO_MASTER(tcp); 133 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0); 134 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0); 135 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0); 136 #define logtcp(name) KTR_LOG(tcp_ ## name) 137 */ 138 139 #define TCP_IW_MAXSEGS_DFLT 4 140 #define TCP_IW_CAPSEGS_DFLT 4 141 142 struct tcp_reass_pcpu { 143 int draining; 144 struct netmsg_base drain_nmsg; 145 } __cachealign; 146 147 struct inpcbinfo tcbinfo[MAXCPU]; 148 struct tcpcbackq tcpcbackq[MAXCPU]; 149 struct tcp_reass_pcpu tcp_reassq[MAXCPU]; 150 151 int tcp_mssdflt = TCP_MSS; 152 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 153 &tcp_mssdflt, 0, "Default TCP Maximum Segment Size"); 154 155 #ifdef INET6 156 int tcp_v6mssdflt = TCP6_MSS; 157 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW, 158 &tcp_v6mssdflt, 0, "Default TCP Maximum Segment Size for IPv6"); 159 #endif 160 161 /* 162 * Minimum MSS we accept and use. This prevents DoS attacks where 163 * we are forced to a ridiculous low MSS like 20 and send hundreds 164 * of packets instead of one. The effect scales with the available 165 * bandwidth and quickly saturates the CPU and network interface 166 * with packet generation and sending. Set to zero to disable MINMSS 167 * checking. This setting prevents us from sending too small packets. 168 */ 169 int tcp_minmss = TCP_MINMSS; 170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 171 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 172 173 #if 0 174 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 175 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 176 &tcp_rttdflt, 0, "Default maximum TCP Round Trip Time"); 177 #endif 178 179 int tcp_do_rfc1323 = 1; 180 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 181 &tcp_do_rfc1323, 0, "Enable rfc1323 (high performance TCP) extensions"); 182 183 static int tcp_tcbhashsize = 0; 184 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 185 &tcp_tcbhashsize, 0, "Size of TCP control block hashtable"); 186 187 static int do_tcpdrain = 1; 188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 189 "Enable tcp_drain routine for extra help when low on mbufs"); 190 191 static int icmp_may_rst = 1; 192 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 193 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 194 195 /* 196 * Recommend 20 (6 times in two minutes) 197 * 198 * Lower values may cause the sequence space to cycle too quickly and lose 199 * its signed monotonically-increasing nature within the 2-minute TIMEDWAIT 200 * window. 201 */ 202 static int tcp_isn_reseed_interval = 20; 203 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 204 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 205 206 /* 207 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on 208 * by default, but with generous values which should allow maximal 209 * bandwidth. In particular, the slop defaults to 50 (5 packets). 210 * 211 * The reason for doing this is that the limiter is the only mechanism we 212 * have which seems to do a really good job preventing receiver RX rings 213 * on network interfaces from getting blown out. Even though GigE/10GigE 214 * is supposed to flow control it looks like either it doesn't actually 215 * do it or Open Source drivers do not properly enable it. 216 * 217 * People using the limiter to reduce bottlenecks on slower WAN connections 218 * should set the slop to 20 (2 packets). 219 */ 220 static int tcp_inflight_enable = 1; 221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 222 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 223 224 static int tcp_inflight_debug = 0; 225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 226 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 227 228 /* 229 * NOTE: tcp_inflight_start is essentially the starting receive window 230 * for a connection. If set too low then fetches over tcp 231 * connections will take noticably longer to ramp-up over 232 * high-latency connections. 6144 is too low for a default, 233 * use something more reasonable. 234 */ 235 static int tcp_inflight_start = 33792; 236 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_start, CTLFLAG_RW, 237 &tcp_inflight_start, 0, "Start value for TCP inflight window"); 238 239 static int tcp_inflight_min = 6144; 240 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 241 &tcp_inflight_min, 0, "Lower bound for TCP inflight window"); 242 243 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 244 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 245 &tcp_inflight_max, 0, "Upper bound for TCP inflight window"); 246 247 static int tcp_inflight_stab = 50; 248 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 249 &tcp_inflight_stab, 0, "Fudge bw 1/10% (50=5%)"); 250 251 static int tcp_inflight_adjrtt = 2; 252 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_adjrtt, CTLFLAG_RW, 253 &tcp_inflight_adjrtt, 0, "Slop for rtt 1/(hz*32)"); 254 255 static int tcp_do_rfc3390 = 1; 256 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 257 &tcp_do_rfc3390, 0, 258 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 259 260 static u_long tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT; 261 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwmaxsegs, CTLFLAG_RW, 262 &tcp_iw_maxsegs, 0, "TCP IW segments max"); 263 264 static u_long tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT; 265 SYSCTL_ULONG(_net_inet_tcp, OID_AUTO, iwcapsegs, CTLFLAG_RW, 266 &tcp_iw_capsegs, 0, "TCP IW segments"); 267 268 int tcp_low_rtobase = 1; 269 SYSCTL_INT(_net_inet_tcp, OID_AUTO, low_rtobase, CTLFLAG_RW, 270 &tcp_low_rtobase, 0, "Lowering the Initial RTO (RFC 6298)"); 271 272 static int tcp_do_ncr = 1; 273 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr, CTLFLAG_RW, 274 &tcp_do_ncr, 0, "Non-Congestion Robustness (RFC 4653)"); 275 276 int tcp_ncr_linklocal = 0; 277 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr_linklocal, CTLFLAG_RW, 278 &tcp_ncr_linklocal, 0, 279 "Enable Non-Congestion Robustness (RFC 4653) on link local network"); 280 281 int tcp_ncr_rxtthresh_max = 16; 282 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ncr_rxtthresh_max, CTLFLAG_RW, 283 &tcp_ncr_rxtthresh_max, 0, 284 "Non-Congestion Robustness (RFC 4653), DupThresh upper limit"); 285 286 static MALLOC_DEFINE(M_TCPTEMP, "tcptemp", "TCP Templates for Keepalives"); 287 static struct malloc_pipe tcptemp_mpipe; 288 289 static void tcp_willblock(void); 290 static void tcp_notify (struct inpcb *, int); 291 292 struct tcp_stats tcpstats_percpu[MAXCPU] __cachealign; 293 struct tcp_state_count tcpstate_count[MAXCPU] __cachealign; 294 295 static void tcp_drain_dispatch(netmsg_t nmsg); 296 297 static int 298 sysctl_tcpstats(SYSCTL_HANDLER_ARGS) 299 { 300 int cpu, error = 0; 301 302 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 303 if ((error = SYSCTL_OUT(req, &tcpstats_percpu[cpu], 304 sizeof(struct tcp_stats)))) 305 break; 306 if ((error = SYSCTL_IN(req, &tcpstats_percpu[cpu], 307 sizeof(struct tcp_stats)))) 308 break; 309 } 310 311 return (error); 312 } 313 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 314 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); 315 316 /* 317 * Target size of TCP PCB hash tables. Must be a power of two. 318 * 319 * Note that this can be overridden by the kernel environment 320 * variable net.inet.tcp.tcbhashsize 321 */ 322 #ifndef TCBHASHSIZE 323 #define TCBHASHSIZE 512 324 #endif 325 CTASSERT(powerof2(TCBHASHSIZE)); 326 327 /* 328 * This is the actual shape of what we allocate using the zone 329 * allocator. Doing it this way allows us to protect both structures 330 * using the same generation count, and also eliminates the overhead 331 * of allocating tcpcbs separately. By hiding the structure here, 332 * we avoid changing most of the rest of the code (although it needs 333 * to be changed, eventually, for greater efficiency). 334 */ 335 #define ALIGNMENT 32 336 #define ALIGNM1 (ALIGNMENT - 1) 337 struct inp_tp { 338 union { 339 struct inpcb inp; 340 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 341 } inp_tp_u; 342 struct tcpcb tcb; 343 struct tcp_callout inp_tp_rexmt; 344 struct tcp_callout inp_tp_persist; 345 struct tcp_callout inp_tp_keep; 346 struct tcp_callout inp_tp_2msl; 347 struct tcp_callout inp_tp_delack; 348 struct netmsg_tcp_timer inp_tp_timermsg; 349 struct netmsg_base inp_tp_sndmore; 350 }; 351 #undef ALIGNMENT 352 #undef ALIGNM1 353 354 /* 355 * Tcp initialization 356 */ 357 void 358 tcp_init(void) 359 { 360 struct inpcbportinfo *portinfo; 361 struct inpcbinfo *ticb; 362 int hashsize = TCBHASHSIZE, portinfo_hsize; 363 int cpu; 364 365 /* 366 * note: tcptemp is used for keepalives, and it is ok for an 367 * allocation to fail so do not specify MPF_INT. 368 */ 369 mpipe_init(&tcptemp_mpipe, M_TCPTEMP, sizeof(struct tcptemp), 370 25, -1, 0, NULL, NULL, NULL); 371 372 tcp_delacktime = TCPTV_DELACK; 373 tcp_keepinit = TCPTV_KEEP_INIT; 374 tcp_keepidle = TCPTV_KEEP_IDLE; 375 tcp_keepintvl = TCPTV_KEEPINTVL; 376 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 377 tcp_msl = TCPTV_MSL; 378 tcp_rexmit_min = TCPTV_MIN; 379 if (tcp_rexmit_min < 1) /* if kern.hz is too low */ 380 tcp_rexmit_min = 1; 381 tcp_rexmit_slop = TCPTV_CPU_VAR; 382 383 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 384 if (!powerof2(hashsize)) { 385 kprintf("WARNING: TCB hash size not a power of 2\n"); 386 hashsize = TCBHASHSIZE; /* safe default */ 387 } 388 tcp_tcbhashsize = hashsize; 389 390 portinfo_hsize = 65536 / netisr_ncpus; 391 if (portinfo_hsize > hashsize) 392 portinfo_hsize = hashsize; 393 394 portinfo = kmalloc(sizeof(*portinfo) * netisr_ncpus, M_PCB, 395 M_WAITOK | M_CACHEALIGN); 396 397 for (cpu = 0; cpu < netisr_ncpus; cpu++) { 398 ticb = &tcbinfo[cpu]; 399 in_pcbinfo_init(ticb, cpu, FALSE); 400 ticb->hashbase = hashinit(hashsize, M_PCB, 401 &ticb->hashmask); 402 in_pcbportinfo_init(&portinfo[cpu], portinfo_hsize, cpu); 403 in_pcbportinfo_set(ticb, portinfo, netisr_ncpus); 404 ticb->wildcardhashbase = hashinit(hashsize, M_PCB, 405 &ticb->wildcardhashmask); 406 ticb->localgrphashbase = hashinit(hashsize, M_PCB, 407 &ticb->localgrphashmask); 408 ticb->ipi_size = sizeof(struct inp_tp); 409 TAILQ_INIT(&tcpcbackq[cpu].head); 410 } 411 412 tcp_reass_maxseg = nmbclusters / 16; 413 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg); 414 415 #ifdef INET6 416 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 417 #else 418 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 419 #endif 420 if (max_protohdr < TCP_MINPROTOHDR) 421 max_protohdr = TCP_MINPROTOHDR; 422 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 423 panic("tcp_init"); 424 #undef TCP_MINPROTOHDR 425 426 /* 427 * Initialize TCP statistics counters for each CPU. 428 */ 429 for (cpu = 0; cpu < netisr_ncpus; ++cpu) 430 bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); 431 432 /* 433 * Initialize netmsgs for TCP drain 434 */ 435 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 436 netmsg_init(&tcp_reassq[cpu].drain_nmsg, NULL, 437 &netisr_adone_rport, MSGF_PRIORITY, tcp_drain_dispatch); 438 } 439 440 syncache_init(); 441 netisr_register_rollup(tcp_willblock, NETISR_ROLLUP_PRIO_TCP); 442 } 443 444 static void 445 tcp_willblock(void) 446 { 447 struct tcpcb *tp; 448 int cpu = mycpuid; 449 450 while ((tp = TAILQ_FIRST(&tcpcbackq[cpu].head)) != NULL) { 451 KKASSERT(tp->t_flags & TF_ONOUTPUTQ); 452 tp->t_flags &= ~TF_ONOUTPUTQ; 453 TAILQ_REMOVE(&tcpcbackq[cpu].head, tp, t_outputq); 454 tcp_output(tp); 455 } 456 } 457 458 /* 459 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 460 * tcp_template used to store this data in mbufs, but we now recopy it out 461 * of the tcpcb each time to conserve mbufs. 462 */ 463 void 464 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr, boolean_t tso) 465 { 466 struct inpcb *inp = tp->t_inpcb; 467 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; 468 469 #ifdef INET6 470 if (INP_ISIPV6(inp)) { 471 struct ip6_hdr *ip6; 472 473 ip6 = (struct ip6_hdr *)ip_ptr; 474 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 475 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 476 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 477 (IPV6_VERSION & IPV6_VERSION_MASK); 478 ip6->ip6_nxt = IPPROTO_TCP; 479 ip6->ip6_plen = sizeof(struct tcphdr); 480 ip6->ip6_src = inp->in6p_laddr; 481 ip6->ip6_dst = inp->in6p_faddr; 482 tcp_hdr->th_sum = 0; 483 } else 484 #endif 485 { 486 struct ip *ip = (struct ip *) ip_ptr; 487 u_int plen; 488 489 ip->ip_vhl = IP_VHL_BORING; 490 ip->ip_tos = 0; 491 ip->ip_len = 0; 492 ip->ip_id = 0; 493 ip->ip_off = 0; 494 ip->ip_ttl = 0; 495 ip->ip_sum = 0; 496 ip->ip_p = IPPROTO_TCP; 497 ip->ip_src = inp->inp_laddr; 498 ip->ip_dst = inp->inp_faddr; 499 500 if (tso) 501 plen = htons(IPPROTO_TCP); 502 else 503 plen = htons(sizeof(struct tcphdr) + IPPROTO_TCP); 504 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, 505 ip->ip_dst.s_addr, plen); 506 } 507 508 tcp_hdr->th_sport = inp->inp_lport; 509 tcp_hdr->th_dport = inp->inp_fport; 510 tcp_hdr->th_seq = 0; 511 tcp_hdr->th_ack = 0; 512 tcp_hdr->th_x2 = 0; 513 tcp_hdr->th_off = 5; 514 tcp_hdr->th_flags = 0; 515 tcp_hdr->th_win = 0; 516 tcp_hdr->th_urp = 0; 517 } 518 519 /* 520 * Create template to be used to send tcp packets on a connection. 521 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 522 * use for this function is in keepalives, which use tcp_respond. 523 */ 524 struct tcptemp * 525 tcp_maketemplate(struct tcpcb *tp) 526 { 527 struct tcptemp *tmp; 528 529 if ((tmp = mpipe_alloc_nowait(&tcptemp_mpipe)) == NULL) 530 return (NULL); 531 tcp_fillheaders(tp, &tmp->tt_ipgen, &tmp->tt_t, FALSE); 532 return (tmp); 533 } 534 535 void 536 tcp_freetemplate(struct tcptemp *tmp) 537 { 538 mpipe_free(&tcptemp_mpipe, tmp); 539 } 540 541 /* 542 * Send a single message to the TCP at address specified by 543 * the given TCP/IP header. If m == NULL, then we make a copy 544 * of the tcpiphdr at ti and send directly to the addressed host. 545 * This is used to force keep alive messages out using the TCP 546 * template for a connection. If flags are given then we send 547 * a message back to the TCP which originated the * segment ti, 548 * and discard the mbuf containing it and any other attached mbufs. 549 * 550 * In any case the ack and sequence number of the transmitted 551 * segment are as specified by the parameters. 552 * 553 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 554 */ 555 void 556 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, 557 tcp_seq ack, tcp_seq seq, int flags) 558 { 559 int tlen; 560 long win = 0; 561 struct route *ro = NULL; 562 struct route sro; 563 struct ip *ip = ipgen; 564 struct tcphdr *nth; 565 int ipflags = 0; 566 struct route_in6 *ro6 = NULL; 567 struct route_in6 sro6; 568 struct ip6_hdr *ip6 = ipgen; 569 struct inpcb *inp = NULL; 570 boolean_t use_tmpro = TRUE; 571 #ifdef INET6 572 boolean_t isipv6 = (IP_VHL_V(ip->ip_vhl) == 6); 573 #else 574 const boolean_t isipv6 = FALSE; 575 #endif 576 577 if (tp != NULL) { 578 inp = tp->t_inpcb; 579 if (!(flags & TH_RST)) { 580 win = ssb_space(&inp->inp_socket->so_rcv); 581 if (win < 0) 582 win = 0; 583 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 584 win = (long)TCP_MAXWIN << tp->rcv_scale; 585 } 586 /* 587 * Don't use the route cache of a listen socket, 588 * it is not MPSAFE; use temporary route cache. 589 */ 590 if (tp->t_state != TCPS_LISTEN) { 591 if (isipv6) 592 ro6 = &inp->in6p_route; 593 else 594 ro = &inp->inp_route; 595 use_tmpro = FALSE; 596 } 597 } 598 if (use_tmpro) { 599 if (isipv6) { 600 ro6 = &sro6; 601 bzero(ro6, sizeof *ro6); 602 } else { 603 ro = &sro; 604 bzero(ro, sizeof *ro); 605 } 606 } 607 if (m == NULL) { 608 m = m_gethdr(M_NOWAIT, MT_HEADER); 609 if (m == NULL) 610 return; 611 tlen = 0; 612 m->m_data += max_linkhdr; 613 if (isipv6) { 614 bcopy(ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); 615 ip6 = mtod(m, struct ip6_hdr *); 616 nth = (struct tcphdr *)(ip6 + 1); 617 } else { 618 bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); 619 ip = mtod(m, struct ip *); 620 nth = (struct tcphdr *)(ip + 1); 621 } 622 bcopy(th, nth, sizeof(struct tcphdr)); 623 flags = TH_ACK; 624 } else { 625 m_freem(m->m_next); 626 m->m_next = NULL; 627 m->m_data = (caddr_t)ipgen; 628 /* m_len is set later */ 629 tlen = 0; 630 #define xchg(a, b, type) { type t; t = a; a = b; b = t; } 631 if (isipv6) { 632 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 633 nth = (struct tcphdr *)(ip6 + 1); 634 } else { 635 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 636 nth = (struct tcphdr *)(ip + 1); 637 } 638 if (th != nth) { 639 /* 640 * this is usually a case when an extension header 641 * exists between the IPv6 header and the 642 * TCP header. 643 */ 644 nth->th_sport = th->th_sport; 645 nth->th_dport = th->th_dport; 646 } 647 xchg(nth->th_dport, nth->th_sport, n_short); 648 #undef xchg 649 } 650 if (isipv6) { 651 ip6->ip6_flow = 0; 652 ip6->ip6_vfc = IPV6_VERSION; 653 ip6->ip6_nxt = IPPROTO_TCP; 654 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); 655 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 656 } else { 657 tlen += sizeof(struct tcpiphdr); 658 ip->ip_len = htons(tlen); 659 ip->ip_ttl = ip_defttl; 660 } 661 m->m_len = tlen; 662 m->m_pkthdr.len = tlen; 663 m->m_pkthdr.rcvif = NULL; 664 nth->th_seq = htonl(seq); 665 nth->th_ack = htonl(ack); 666 nth->th_x2 = 0; 667 nth->th_off = sizeof(struct tcphdr) >> 2; 668 nth->th_flags = flags; 669 if (tp != NULL) 670 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 671 else 672 nth->th_win = htons((u_short)win); 673 nth->th_urp = 0; 674 if (isipv6) { 675 nth->th_sum = 0; 676 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 677 sizeof(struct ip6_hdr), 678 tlen - sizeof(struct ip6_hdr)); 679 ip6->ip6_hlim = in6_selecthlim(inp, 680 (ro6 && ro6->ro_rt) ? ro6->ro_rt->rt_ifp : NULL); 681 } else { 682 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 683 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 684 m->m_pkthdr.csum_flags = CSUM_TCP; 685 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 686 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr); 687 } 688 #ifdef TCPDEBUG 689 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 690 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 691 #endif 692 if (isipv6) { 693 ip6_output(m, NULL, ro6, ipflags, NULL, NULL, inp); 694 if ((ro6 == &sro6) && (ro6->ro_rt != NULL)) { 695 RTFREE(ro6->ro_rt); 696 ro6->ro_rt = NULL; 697 } 698 } else { 699 if (inp != NULL && (inp->inp_flags & INP_HASH)) 700 m_sethash(m, inp->inp_hashval); 701 ipflags |= IP_DEBUGROUTE; 702 ip_output(m, NULL, ro, ipflags, NULL, inp); 703 if ((ro == &sro) && (ro->ro_rt != NULL)) { 704 RTFREE(ro->ro_rt); 705 ro->ro_rt = NULL; 706 } 707 } 708 } 709 710 /* 711 * Create a new TCP control block, making an 712 * empty reassembly queue and hooking it to the argument 713 * protocol control block. The `inp' parameter must have 714 * come from the zone allocator set up in tcp_init(). 715 */ 716 void 717 tcp_newtcpcb(struct inpcb *inp) 718 { 719 struct inp_tp *it; 720 struct tcpcb *tp; 721 #ifdef INET6 722 boolean_t isipv6 = INP_ISIPV6(inp); 723 #else 724 const boolean_t isipv6 = FALSE; 725 #endif 726 727 it = (struct inp_tp *)inp; 728 tp = &it->tcb; 729 bzero(tp, sizeof(struct tcpcb)); 730 TAILQ_INIT(&tp->t_segq); 731 tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; 732 tp->t_rxtthresh = tcprexmtthresh; 733 734 /* Set up our timeouts. */ 735 tp->tt_rexmt = &it->inp_tp_rexmt; 736 tp->tt_persist = &it->inp_tp_persist; 737 tp->tt_keep = &it->inp_tp_keep; 738 tp->tt_2msl = &it->inp_tp_2msl; 739 tp->tt_delack = &it->inp_tp_delack; 740 tcp_inittimers(tp); 741 742 /* 743 * Zero out timer message. We don't create it here, 744 * since the current CPU may not be the owner of this 745 * inpcb. 746 */ 747 tp->tt_msg = &it->inp_tp_timermsg; 748 bzero(tp->tt_msg, sizeof(*tp->tt_msg)); 749 750 tp->t_keepinit = tcp_keepinit; 751 tp->t_keepidle = tcp_keepidle; 752 tp->t_keepintvl = tcp_keepintvl; 753 tp->t_keepcnt = tcp_keepcnt; 754 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt; 755 756 if (tcp_do_ncr) 757 tp->t_flags |= TF_NCR; 758 if (tcp_do_rfc1323) 759 tp->t_flags |= (TF_REQ_SCALE | TF_REQ_TSTMP); 760 761 tp->t_inpcb = inp; /* XXX */ 762 TCP_STATE_INIT(tp); 763 /* 764 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 765 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 766 * reasonable initial retransmit time. 767 */ 768 tp->t_srtt = TCPTV_SRTTBASE; 769 tp->t_rttvar = 770 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 771 tp->t_rttmin = tcp_rexmit_min; 772 tp->t_rxtcur = TCPTV_RTOBASE; 773 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 774 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 775 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 776 tp->snd_last = ticks; 777 tp->t_rcvtime = ticks; 778 /* 779 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 780 * because the socket may be bound to an IPv6 wildcard address, 781 * which may match an IPv4-mapped IPv6 address. 782 */ 783 inp->inp_ip_ttl = ip_defttl; 784 inp->inp_ppcb = tp; 785 tcp_sack_tcpcb_init(tp); 786 787 tp->tt_sndmore = &it->inp_tp_sndmore; 788 tcp_output_init(tp); 789 } 790 791 /* 792 * Drop a TCP connection, reporting the specified error. 793 * If connection is synchronized, then send a RST to peer. 794 */ 795 struct tcpcb * 796 tcp_drop(struct tcpcb *tp, int error) 797 { 798 struct socket *so = tp->t_inpcb->inp_socket; 799 800 if (TCPS_HAVERCVDSYN(tp->t_state)) { 801 TCP_STATE_CHANGE(tp, TCPS_CLOSED); 802 tcp_output(tp); 803 tcpstat.tcps_drops++; 804 } else 805 tcpstat.tcps_conndrops++; 806 if (error == ETIMEDOUT && tp->t_softerror) 807 error = tp->t_softerror; 808 so->so_error = error; 809 return (tcp_close(tp)); 810 } 811 812 struct netmsg_listen_detach { 813 struct netmsg_base base; 814 struct tcpcb *nm_tp; 815 struct tcpcb *nm_tp_inh; 816 }; 817 818 static void 819 tcp_listen_detach_handler(netmsg_t msg) 820 { 821 struct netmsg_listen_detach *nmsg = (struct netmsg_listen_detach *)msg; 822 struct tcpcb *tp = nmsg->nm_tp; 823 int cpu = mycpuid, nextcpu; 824 825 if (tp->t_flags & TF_LISTEN) { 826 syncache_destroy(tp, nmsg->nm_tp_inh); 827 tcp_pcbport_merge_oncpu(tp); 828 } 829 830 in_pcbremwildcardhash_oncpu(tp->t_inpcb, &tcbinfo[cpu]); 831 832 nextcpu = cpu + 1; 833 if (nextcpu < netisr_ncpus) 834 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->base.lmsg); 835 else 836 lwkt_replymsg(&nmsg->base.lmsg, 0); 837 } 838 839 /* 840 * Close a TCP control block: 841 * discard all space held by the tcp 842 * discard internet protocol block 843 * wake up any sleepers 844 */ 845 struct tcpcb * 846 tcp_close(struct tcpcb *tp) 847 { 848 struct tseg_qent *q; 849 struct inpcb *inp = tp->t_inpcb; 850 struct inpcb *inp_inh = NULL; 851 struct tcpcb *tp_inh = NULL; 852 struct socket *so = inp->inp_socket; 853 struct rtentry *rt; 854 boolean_t dosavessthresh; 855 #ifdef INET6 856 boolean_t isipv6 = INP_ISIPV6(inp); 857 #else 858 const boolean_t isipv6 = FALSE; 859 #endif 860 861 if (tp->t_flags & TF_LISTEN) { 862 /* 863 * Pending socket/syncache inheritance 864 * 865 * If this is a listen(2) socket, find another listen(2) 866 * socket in the same local group, which could inherit 867 * the syncache and sockets pending on the completion 868 * and incompletion queues. 869 * 870 * NOTE: 871 * Currently the inheritance could only happen on the 872 * listen(2) sockets w/ SO_REUSEPORT set. 873 */ 874 ASSERT_NETISR0; 875 inp_inh = in_pcblocalgroup_last(&tcbinfo[0], inp); 876 if (inp_inh != NULL) 877 tp_inh = intotcpcb(inp_inh); 878 } 879 880 /* 881 * INP_WILDCARD indicates that listen(2) has been called on 882 * this socket. This implies: 883 * - A wildcard inp's hash is replicated for each protocol thread. 884 * - Syncache for this inp grows independently in each protocol 885 * thread. 886 * - There is more than one cpu 887 * 888 * We have to chain a message to the rest of the protocol threads 889 * to cleanup the wildcard hash and the syncache. The cleanup 890 * in the current protocol thread is defered till the end of this 891 * function (syncache_destroy and in_pcbdetach). 892 * 893 * NOTE: 894 * After cleanup the inp's hash and syncache entries, this inp will 895 * no longer be available to the rest of the protocol threads, so we 896 * are safe to whack the inp in the following code. 897 */ 898 if ((inp->inp_flags & INP_WILDCARD) && netisr_ncpus > 1) { 899 struct netmsg_listen_detach nmsg; 900 901 KKASSERT(so->so_port == netisr_cpuport(0)); 902 ASSERT_NETISR0; 903 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); 904 905 netmsg_init(&nmsg.base, NULL, &curthread->td_msgport, 906 MSGF_PRIORITY, tcp_listen_detach_handler); 907 nmsg.nm_tp = tp; 908 nmsg.nm_tp_inh = tp_inh; 909 lwkt_domsg(netisr_cpuport(1), &nmsg.base.lmsg, 0); 910 } 911 912 TCP_STATE_TERM(tp); 913 914 /* 915 * Make sure that all of our timers are stopped before we 916 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL), 917 * timers are never used. If timer message is never created 918 * (tp->tt_msg->tt_tcb == NULL), timers are never used too. 919 */ 920 if (tp->tt_msg != NULL && tp->tt_msg->tt_tcb != NULL) { 921 tcp_callout_terminate(tp, tp->tt_rexmt); 922 tcp_callout_terminate(tp, tp->tt_persist); 923 tcp_callout_terminate(tp, tp->tt_keep); 924 tcp_callout_terminate(tp, tp->tt_2msl); 925 tcp_callout_terminate(tp, tp->tt_delack); 926 } 927 928 if (tp->t_flags & TF_ONOUTPUTQ) { 929 KKASSERT(tp->tt_cpu == mycpu->gd_cpuid); 930 TAILQ_REMOVE(&tcpcbackq[tp->tt_cpu].head, tp, t_outputq); 931 tp->t_flags &= ~TF_ONOUTPUTQ; 932 } 933 934 /* 935 * If we got enough samples through the srtt filter, 936 * save the rtt and rttvar in the routing entry. 937 * 'Enough' is arbitrarily defined as the 16 samples. 938 * 16 samples is enough for the srtt filter to converge 939 * to within 5% of the correct value; fewer samples and 940 * we could save a very bogus rtt. 941 * 942 * Don't update the default route's characteristics and don't 943 * update anything that the user "locked". 944 */ 945 if (tp->t_rttupdated >= 16) { 946 u_long i = 0; 947 948 if (isipv6) { 949 struct sockaddr_in6 *sin6; 950 951 if ((rt = inp->in6p_route.ro_rt) == NULL) 952 goto no_valid_rt; 953 sin6 = (struct sockaddr_in6 *)rt_key(rt); 954 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 955 goto no_valid_rt; 956 } else 957 if ((rt = inp->inp_route.ro_rt) == NULL || 958 ((struct sockaddr_in *)rt_key(rt))-> 959 sin_addr.s_addr == INADDR_ANY) 960 goto no_valid_rt; 961 962 if (!(rt->rt_rmx.rmx_locks & RTV_RTT)) { 963 i = tp->t_srtt * (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 964 if (rt->rt_rmx.rmx_rtt && i) 965 /* 966 * filter this update to half the old & half 967 * the new values, converting scale. 968 * See route.h and tcp_var.h for a 969 * description of the scaling constants. 970 */ 971 rt->rt_rmx.rmx_rtt = 972 (rt->rt_rmx.rmx_rtt + i) / 2; 973 else 974 rt->rt_rmx.rmx_rtt = i; 975 tcpstat.tcps_cachedrtt++; 976 } 977 if (!(rt->rt_rmx.rmx_locks & RTV_RTTVAR)) { 978 i = tp->t_rttvar * 979 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 980 if (rt->rt_rmx.rmx_rttvar && i) 981 rt->rt_rmx.rmx_rttvar = 982 (rt->rt_rmx.rmx_rttvar + i) / 2; 983 else 984 rt->rt_rmx.rmx_rttvar = i; 985 tcpstat.tcps_cachedrttvar++; 986 } 987 /* 988 * The old comment here said: 989 * update the pipelimit (ssthresh) if it has been updated 990 * already or if a pipesize was specified & the threshhold 991 * got below half the pipesize. I.e., wait for bad news 992 * before we start updating, then update on both good 993 * and bad news. 994 * 995 * But we want to save the ssthresh even if no pipesize is 996 * specified explicitly in the route, because such 997 * connections still have an implicit pipesize specified 998 * by the global tcp_sendspace. In the absence of a reliable 999 * way to calculate the pipesize, it will have to do. 1000 */ 1001 i = tp->snd_ssthresh; 1002 if (rt->rt_rmx.rmx_sendpipe != 0) 1003 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe/2); 1004 else 1005 dosavessthresh = (i < so->so_snd.ssb_hiwat/2); 1006 if (dosavessthresh || 1007 (!(rt->rt_rmx.rmx_locks & RTV_SSTHRESH) && (i != 0) && 1008 (rt->rt_rmx.rmx_ssthresh != 0))) { 1009 /* 1010 * convert the limit from user data bytes to 1011 * packets then to packet data bytes. 1012 */ 1013 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 1014 if (i < 2) 1015 i = 2; 1016 i *= tp->t_maxseg + 1017 (isipv6 ? 1018 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1019 sizeof(struct tcpiphdr)); 1020 if (rt->rt_rmx.rmx_ssthresh) 1021 rt->rt_rmx.rmx_ssthresh = 1022 (rt->rt_rmx.rmx_ssthresh + i) / 2; 1023 else 1024 rt->rt_rmx.rmx_ssthresh = i; 1025 tcpstat.tcps_cachedssthresh++; 1026 } 1027 } 1028 1029 no_valid_rt: 1030 /* free the reassembly queue, if any */ 1031 while((q = TAILQ_FIRST(&tp->t_segq)) != NULL) { 1032 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 1033 m_freem(q->tqe_m); 1034 kfree(q, M_TSEGQ); 1035 atomic_add_int(&tcp_reass_qsize, -1); 1036 } 1037 /* throw away SACK blocks in scoreboard*/ 1038 if (TCP_DO_SACK(tp)) 1039 tcp_sack_destroy(&tp->scb); 1040 1041 inp->inp_ppcb = NULL; 1042 soisdisconnected(so); 1043 /* note: pcb detached later on */ 1044 1045 tcp_destroy_timermsg(tp); 1046 tcp_output_cancel(tp); 1047 1048 if (tp->t_flags & TF_LISTEN) { 1049 syncache_destroy(tp, tp_inh); 1050 tcp_pcbport_merge_oncpu(tp); 1051 tcp_pcbport_destroy(tp); 1052 if (inp_inh != NULL && inp_inh->inp_socket != NULL) { 1053 /* 1054 * Pending sockets inheritance only needs 1055 * to be done once in the current thread, 1056 * i.e. netisr0. 1057 */ 1058 soinherit(so, inp_inh->inp_socket); 1059 } 1060 } 1061 KASSERT(tp->t_pcbport == NULL, ("tcpcb port cache is not destroyed")); 1062 1063 so_async_rcvd_drop(so); 1064 /* Drop the reference for the asynchronized pru_rcvd */ 1065 sofree(so); 1066 1067 /* 1068 * NOTE: 1069 * - Remove self from listen tcpcb per-cpu port cache _before_ 1070 * pcbdetach. 1071 * - pcbdetach removes any wildcard hash entry on the current CPU. 1072 */ 1073 tcp_pcbport_remove(inp); 1074 #ifdef INET6 1075 if (isipv6) 1076 in6_pcbdetach(inp); 1077 else 1078 #endif 1079 in_pcbdetach(inp); 1080 1081 tcpstat.tcps_closed++; 1082 return (NULL); 1083 } 1084 1085 /* 1086 * Walk the tcpbs, if existing, and flush the reassembly queue, 1087 * if there is one... 1088 */ 1089 static void 1090 tcp_drain_oncpu(struct inpcbinfo *pcbinfo) 1091 { 1092 struct inpcbhead *head = &pcbinfo->pcblisthead; 1093 struct inpcb *inpb; 1094 1095 /* 1096 * Since we run in netisr, it is MP safe, even if 1097 * we block during the inpcb list iteration, i.e. 1098 * we don't need to use inpcb marker here. 1099 */ 1100 ASSERT_NETISR_NCPUS(pcbinfo->cpu); 1101 1102 LIST_FOREACH(inpb, head, inp_list) { 1103 struct tcpcb *tcpb; 1104 struct tseg_qent *te; 1105 1106 if (inpb->inp_flags & INP_PLACEMARKER) 1107 continue; 1108 1109 tcpb = intotcpcb(inpb); 1110 KASSERT(tcpb != NULL, ("tcp_drain_oncpu: tcpb is NULL")); 1111 1112 if ((te = TAILQ_FIRST(&tcpb->t_segq)) != NULL) { 1113 TAILQ_REMOVE(&tcpb->t_segq, te, tqe_q); 1114 if (te->tqe_th->th_flags & TH_FIN) 1115 tcpb->t_flags &= ~TF_QUEDFIN; 1116 m_freem(te->tqe_m); 1117 kfree(te, M_TSEGQ); 1118 atomic_add_int(&tcp_reass_qsize, -1); 1119 /* retry */ 1120 } 1121 } 1122 } 1123 1124 static void 1125 tcp_drain_dispatch(netmsg_t nmsg) 1126 { 1127 crit_enter(); 1128 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1129 crit_exit(); 1130 1131 tcp_drain_oncpu(&tcbinfo[mycpuid]); 1132 tcp_reassq[mycpuid].draining = 0; 1133 } 1134 1135 static void 1136 tcp_drain_ipi(void *arg __unused) 1137 { 1138 int cpu = mycpuid; 1139 struct lwkt_msg *msg = &tcp_reassq[cpu].drain_nmsg.lmsg; 1140 1141 crit_enter(); 1142 if (msg->ms_flags & MSGF_DONE) 1143 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 1144 crit_exit(); 1145 } 1146 1147 void 1148 tcp_drain(void) 1149 { 1150 cpumask_t mask; 1151 int cpu; 1152 1153 if (!do_tcpdrain) 1154 return; 1155 1156 if (tcp_reass_qsize == 0) 1157 return; 1158 1159 CPUMASK_ASSBMASK(mask, netisr_ncpus); 1160 CPUMASK_ANDMASK(mask, smp_active_mask); 1161 1162 cpu = mycpuid; 1163 if (IN_NETISR_NCPUS(cpu)) { 1164 tcp_drain_oncpu(&tcbinfo[cpu]); 1165 CPUMASK_NANDBIT(mask, cpu); 1166 } 1167 1168 if (tcp_reass_qsize < netisr_ncpus) { 1169 /* Does not worth the trouble. */ 1170 return; 1171 } 1172 1173 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 1174 if (!CPUMASK_TESTBIT(mask, cpu)) 1175 continue; 1176 1177 if (tcp_reassq[cpu].draining) { 1178 /* Draining; skip this cpu. */ 1179 CPUMASK_NANDBIT(mask, cpu); 1180 continue; 1181 } 1182 tcp_reassq[cpu].draining = 1; 1183 } 1184 1185 if (CPUMASK_TESTNZERO(mask)) 1186 lwkt_send_ipiq_mask(mask, tcp_drain_ipi, NULL); 1187 } 1188 1189 /* 1190 * Notify a tcp user of an asynchronous error; 1191 * store error as soft error, but wake up user 1192 * (for now, won't do anything until can select for soft error). 1193 * 1194 * Do not wake up user since there currently is no mechanism for 1195 * reporting soft errors (yet - a kqueue filter may be added). 1196 */ 1197 static void 1198 tcp_notify(struct inpcb *inp, int error) 1199 { 1200 struct tcpcb *tp = intotcpcb(inp); 1201 1202 /* 1203 * Ignore some errors if we are hooked up. 1204 * If connection hasn't completed, has retransmitted several times, 1205 * and receives a second error, give up now. This is better 1206 * than waiting a long time to establish a connection that 1207 * can never complete. 1208 */ 1209 if (tp->t_state == TCPS_ESTABLISHED && 1210 (error == EHOSTUNREACH || error == ENETUNREACH || 1211 error == EHOSTDOWN)) { 1212 return; 1213 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 1214 tp->t_softerror) 1215 tcp_drop(tp, error); 1216 else 1217 tp->t_softerror = error; 1218 #if 0 1219 wakeup(&so->so_timeo); 1220 sorwakeup(so); 1221 sowwakeup(so); 1222 #endif 1223 } 1224 1225 static int 1226 tcp_pcblist(SYSCTL_HANDLER_ARGS) 1227 { 1228 int error, i, n; 1229 struct inpcb *marker; 1230 struct inpcb *inp; 1231 int origcpu, ccpu; 1232 1233 error = 0; 1234 n = 0; 1235 1236 /* 1237 * The process of preparing the TCB list is too time-consuming and 1238 * resource-intensive to repeat twice on every request. 1239 */ 1240 if (req->oldptr == NULL) { 1241 for (ccpu = 0; ccpu < netisr_ncpus; ++ccpu) 1242 n += tcbinfo[ccpu].ipi_count; 1243 req->oldidx = (n + n/8 + 10) * sizeof(struct xtcpcb); 1244 return (0); 1245 } 1246 1247 if (req->newptr != NULL) 1248 return (EPERM); 1249 1250 marker = kmalloc(sizeof(struct inpcb), M_TEMP, M_WAITOK|M_ZERO); 1251 marker->inp_flags |= INP_PLACEMARKER; 1252 1253 /* 1254 * OK, now we're committed to doing something. Run the inpcb list 1255 * for each cpu in the system and construct the output. Use a 1256 * list placemarker to deal with list changes occuring during 1257 * copyout blockages (but otherwise depend on being on the correct 1258 * cpu to avoid races). 1259 */ 1260 origcpu = mycpu->gd_cpuid; 1261 for (ccpu = 0; ccpu < netisr_ncpus && error == 0; ++ccpu) { 1262 caddr_t inp_ppcb; 1263 struct xtcpcb xt; 1264 1265 lwkt_migratecpu(ccpu); 1266 1267 n = tcbinfo[ccpu].ipi_count; 1268 1269 LIST_INSERT_HEAD(&tcbinfo[ccpu].pcblisthead, marker, inp_list); 1270 i = 0; 1271 while ((inp = LIST_NEXT(marker, inp_list)) != NULL && i < n) { 1272 /* 1273 * process a snapshot of pcbs, ignoring placemarkers 1274 * and using our own to allow SYSCTL_OUT to block. 1275 */ 1276 LIST_REMOVE(marker, inp_list); 1277 LIST_INSERT_AFTER(inp, marker, inp_list); 1278 1279 if (inp->inp_flags & INP_PLACEMARKER) 1280 continue; 1281 if (prison_xinpcb(req->td, inp)) 1282 continue; 1283 1284 xt.xt_len = sizeof xt; 1285 bcopy(inp, &xt.xt_inp, sizeof *inp); 1286 inp_ppcb = inp->inp_ppcb; 1287 if (inp_ppcb != NULL) 1288 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 1289 else 1290 bzero(&xt.xt_tp, sizeof xt.xt_tp); 1291 if (inp->inp_socket) 1292 sotoxsocket(inp->inp_socket, &xt.xt_socket); 1293 if ((error = SYSCTL_OUT(req, &xt, sizeof xt)) != 0) 1294 break; 1295 ++i; 1296 } 1297 LIST_REMOVE(marker, inp_list); 1298 if (error == 0 && i < n) { 1299 bzero(&xt, sizeof xt); 1300 xt.xt_len = sizeof xt; 1301 while (i < n) { 1302 error = SYSCTL_OUT(req, &xt, sizeof xt); 1303 if (error) 1304 break; 1305 ++i; 1306 } 1307 } 1308 } 1309 1310 /* 1311 * Make sure we are on the same cpu we were on originally, since 1312 * higher level callers expect this. Also don't pollute caches with 1313 * migrated userland data by (eventually) returning to userland 1314 * on a different cpu. 1315 */ 1316 lwkt_migratecpu(origcpu); 1317 kfree(marker, M_TEMP); 1318 return (error); 1319 } 1320 1321 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1322 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1323 1324 static int 1325 tcp_getcred(SYSCTL_HANDLER_ARGS) 1326 { 1327 struct sockaddr_in addrs[2]; 1328 struct ucred cred0, *cred = NULL; 1329 struct inpcb *inp; 1330 int cpu, origcpu, error; 1331 1332 error = caps_priv_check_td(req->td, SYSCAP_RESTRICTEDROOT); 1333 if (error != 0) 1334 return (error); 1335 error = SYSCTL_IN(req, addrs, sizeof addrs); 1336 if (error != 0) 1337 return (error); 1338 1339 origcpu = mycpuid; 1340 cpu = tcp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, 1341 addrs[0].sin_addr.s_addr, addrs[0].sin_port); 1342 1343 lwkt_migratecpu(cpu); 1344 1345 inp = in_pcblookup_hash(&tcbinfo[cpu], addrs[1].sin_addr, 1346 addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1347 if (inp == NULL || inp->inp_socket == NULL) { 1348 error = ENOENT; 1349 } else if (inp->inp_socket->so_cred != NULL) { 1350 cred0 = *(inp->inp_socket->so_cred); 1351 cred = &cred0; 1352 } 1353 1354 lwkt_migratecpu(origcpu); 1355 1356 if (error) 1357 return (error); 1358 1359 return SYSCTL_OUT(req, cred, sizeof(struct ucred)); 1360 } 1361 1362 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1363 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 1364 1365 #ifdef INET6 1366 static int 1367 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1368 { 1369 struct sockaddr_in6 addrs[2]; 1370 struct inpcb *inp; 1371 int error; 1372 1373 error = caps_priv_check_td(req->td, SYSCAP_RESTRICTEDROOT); 1374 if (error != 0) 1375 return (error); 1376 error = SYSCTL_IN(req, addrs, sizeof addrs); 1377 if (error != 0) 1378 return (error); 1379 crit_enter(); 1380 inp = in6_pcblookup_hash(&tcbinfo[0], 1381 &addrs[1].sin6_addr, addrs[1].sin6_port, 1382 &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL); 1383 if (inp == NULL || inp->inp_socket == NULL) { 1384 error = ENOENT; 1385 goto out; 1386 } 1387 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 1388 out: 1389 crit_exit(); 1390 return (error); 1391 } 1392 1393 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, (CTLTYPE_OPAQUE | CTLFLAG_RW), 1394 0, 0, 1395 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection"); 1396 #endif 1397 1398 struct netmsg_tcp_notify { 1399 struct netmsg_base base; 1400 inp_notify_t nm_notify; 1401 struct in_addr nm_faddr; 1402 int nm_arg; 1403 }; 1404 1405 static void 1406 tcp_notifyall_oncpu(netmsg_t msg) 1407 { 1408 struct netmsg_tcp_notify *nm = (struct netmsg_tcp_notify *)msg; 1409 int nextcpu; 1410 1411 ASSERT_NETISR_NCPUS(mycpuid); 1412 1413 in_pcbnotifyall(&tcbinfo[mycpuid], nm->nm_faddr, 1414 nm->nm_arg, nm->nm_notify); 1415 1416 nextcpu = mycpuid + 1; 1417 if (nextcpu < netisr_ncpus) 1418 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg); 1419 else 1420 lwkt_replymsg(&nm->base.lmsg, 0); 1421 } 1422 1423 inp_notify_t 1424 tcp_get_inpnotify(int cmd, const struct sockaddr *sa, 1425 int *arg, struct ip **ip0, int *cpuid) 1426 { 1427 struct ip *ip = *ip0; 1428 struct in_addr faddr; 1429 inp_notify_t notify = tcp_notify; 1430 1431 faddr = ((const struct sockaddr_in *)sa)->sin_addr; 1432 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1433 return NULL; 1434 1435 *arg = inetctlerrmap[cmd]; 1436 if (cmd == PRC_QUENCH) { 1437 notify = tcp_quench; 1438 } else if (icmp_may_rst && 1439 (cmd == PRC_UNREACH_ADMIN_PROHIB || 1440 cmd == PRC_UNREACH_PORT || 1441 cmd == PRC_TIMXCEED_INTRANS) && 1442 ip != NULL) { 1443 notify = tcp_drop_syn_sent; 1444 } else if (cmd == PRC_MSGSIZE) { 1445 const struct icmp *icmp = (const struct icmp *) 1446 ((caddr_t)ip - offsetof(struct icmp, icmp_ip)); 1447 1448 *arg = ntohs(icmp->icmp_nextmtu); 1449 notify = tcp_mtudisc; 1450 } else if (PRC_IS_REDIRECT(cmd)) { 1451 ip = NULL; 1452 notify = in_rtchange; 1453 } else if (cmd == PRC_HOSTDEAD) { 1454 ip = NULL; 1455 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { 1456 return NULL; 1457 } 1458 1459 if (cpuid != NULL) { 1460 if (ip == NULL) { 1461 /* Go through all effective netisr CPUs. */ 1462 *cpuid = netisr_ncpus; 1463 } else { 1464 const struct tcphdr *th; 1465 1466 th = (const struct tcphdr *) 1467 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2)); 1468 *cpuid = tcp_addrcpu(faddr.s_addr, th->th_dport, 1469 ip->ip_src.s_addr, th->th_sport); 1470 } 1471 } 1472 1473 *ip0 = ip; 1474 return notify; 1475 } 1476 1477 void 1478 tcp_ctlinput(netmsg_t msg) 1479 { 1480 int cmd = msg->ctlinput.nm_cmd; 1481 struct sockaddr *sa = msg->ctlinput.nm_arg; 1482 struct ip *ip = msg->ctlinput.nm_extra; 1483 struct in_addr faddr; 1484 inp_notify_t notify; 1485 int arg, cpuid; 1486 1487 ASSERT_NETISR_NCPUS(mycpuid); 1488 1489 notify = tcp_get_inpnotify(cmd, sa, &arg, &ip, &cpuid); 1490 if (notify == NULL) 1491 goto done; 1492 1493 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1494 if (ip != NULL) { 1495 const struct tcphdr *th; 1496 struct inpcb *inp; 1497 1498 if (cpuid != mycpuid) 1499 goto done; 1500 1501 th = (const struct tcphdr *) 1502 ((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2)); 1503 inp = in_pcblookup_hash(&tcbinfo[mycpuid], faddr, th->th_dport, 1504 ip->ip_src, th->th_sport, 0, NULL); 1505 if (inp != NULL && inp->inp_socket != NULL) { 1506 tcp_seq icmpseq = htonl(th->th_seq); 1507 struct tcpcb *tp = intotcpcb(inp); 1508 1509 if (SEQ_GEQ(icmpseq, tp->snd_una) && 1510 SEQ_LT(icmpseq, tp->snd_max)) 1511 notify(inp, arg); 1512 } else { 1513 struct in_conninfo inc; 1514 1515 inc.inc_fport = th->th_dport; 1516 inc.inc_lport = th->th_sport; 1517 inc.inc_faddr = faddr; 1518 inc.inc_laddr = ip->ip_src; 1519 #ifdef INET6 1520 inc.inc_isipv6 = 0; 1521 #endif 1522 syncache_unreach(&inc, th); 1523 } 1524 } else if (msg->ctlinput.nm_direct) { 1525 if (cpuid != netisr_ncpus && cpuid != mycpuid) 1526 goto done; 1527 1528 in_pcbnotifyall(&tcbinfo[mycpuid], faddr, arg, notify); 1529 } else { 1530 struct netmsg_tcp_notify *nm; 1531 1532 ASSERT_NETISR0; 1533 nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT); 1534 netmsg_init(&nm->base, NULL, &netisr_afree_rport, 1535 0, tcp_notifyall_oncpu); 1536 nm->nm_faddr = faddr; 1537 nm->nm_arg = arg; 1538 nm->nm_notify = notify; 1539 1540 lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg); 1541 } 1542 done: 1543 lwkt_replymsg(&msg->lmsg, 0); 1544 } 1545 1546 #ifdef INET6 1547 1548 void 1549 tcp6_ctlinput(netmsg_t msg) 1550 { 1551 int cmd = msg->ctlinput.nm_cmd; 1552 struct sockaddr *sa = msg->ctlinput.nm_arg; 1553 void *d = msg->ctlinput.nm_extra; 1554 struct tcphdr th; 1555 inp_notify_t notify = tcp_notify; 1556 struct ip6_hdr *ip6; 1557 struct mbuf *m; 1558 struct ip6ctlparam *ip6cp = NULL; 1559 const struct sockaddr_in6 *sa6_src = NULL; 1560 int off; 1561 struct tcp_portonly { 1562 u_int16_t th_sport; 1563 u_int16_t th_dport; 1564 } *thp; 1565 int arg; 1566 1567 if (sa->sa_family != AF_INET6 || 1568 sa->sa_len != sizeof(struct sockaddr_in6)) { 1569 goto out; 1570 } 1571 1572 arg = 0; 1573 if (cmd == PRC_QUENCH) 1574 notify = tcp_quench; 1575 else if (cmd == PRC_MSGSIZE) { 1576 /* 1577 * The MTU can be passed via an icmp6 packet or directly 1578 * via ip6c_cmdarg. 1579 */ 1580 struct ip6ctlparam *ip6cp = d; 1581 1582 if (ip6cp->ip6c_icmp6) { 1583 struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; 1584 arg = ntohl(icmp6->icmp6_mtu); 1585 } else if (ip6cp->ip6c_cmdarg) { 1586 arg = *(uint32_t *)ip6cp->ip6c_cmdarg; 1587 } else { 1588 goto out; 1589 } 1590 notify = tcp_mtudisc; 1591 } else if (!PRC_IS_REDIRECT(cmd) && 1592 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) { 1593 goto out; 1594 } 1595 1596 /* 1597 * If the parameter is from icmp6, decode it. Note that in the 1598 * mtu shortcut case, the rest of the ip6ctlparam content is 1599 * 0 or NULL. 1600 */ 1601 if (d != NULL) { 1602 ip6cp = (struct ip6ctlparam *)d; 1603 m = ip6cp->ip6c_m; 1604 ip6 = ip6cp->ip6c_ip6; 1605 off = ip6cp->ip6c_off; 1606 sa6_src = ip6cp->ip6c_src; 1607 } else { 1608 m = NULL; 1609 ip6 = NULL; 1610 off = 0; /* fool gcc */ 1611 sa6_src = &sa6_any; 1612 } 1613 1614 if (ip6 != NULL) { 1615 struct in_conninfo inc; 1616 /* 1617 * XXX: We assume that when IPV6 is non NULL, 1618 * M and OFF are valid. 1619 */ 1620 1621 /* check if we can safely examine src and dst ports */ 1622 if (m->m_pkthdr.len < off + sizeof *thp) 1623 goto out; 1624 1625 bzero(&th, sizeof th); 1626 m_copydata(m, off, sizeof *thp, (caddr_t)&th); 1627 1628 in6_pcbnotify(&tcbinfo[0], sa, th.th_dport, 1629 (struct sockaddr *)ip6cp->ip6c_src, 1630 th.th_sport, cmd, arg, notify); 1631 1632 inc.inc_fport = th.th_dport; 1633 inc.inc_lport = th.th_sport; 1634 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1635 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1636 inc.inc_isipv6 = 1; 1637 syncache_unreach(&inc, &th); 1638 } else { 1639 in6_pcbnotify(&tcbinfo[0], sa, 0, 1640 (const struct sockaddr *)sa6_src, 0, cmd, arg, notify); 1641 } 1642 out: 1643 lwkt_replymsg(&msg->ctlinput.base.lmsg, 0); 1644 } 1645 1646 #endif 1647 1648 /* 1649 * Following is where TCP initial sequence number generation occurs. 1650 * 1651 * There are two places where we must use initial sequence numbers: 1652 * 1. In SYN-ACK packets. 1653 * 2. In SYN packets. 1654 * 1655 * All ISNs for SYN-ACK packets are generated by the syncache. See 1656 * tcp_syncache.c for details. 1657 * 1658 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1659 * depends on this property. In addition, these ISNs should be 1660 * unguessable so as to prevent connection hijacking. To satisfy 1661 * the requirements of this situation, the algorithm outlined in 1662 * RFC 1948 is used to generate sequence numbers. 1663 * 1664 * Implementation details: 1665 * 1666 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1667 * between the seeding of isn_secret. On every reseed we jump the 1668 * ISN by a lot. 1669 */ 1670 struct tcp_isn { 1671 u_char secret[16]; 1672 MD5_CTX ctx; 1673 int last_reseed; 1674 int last_offset; 1675 } __cachealign; 1676 1677 struct tcp_isn tcp_isn_ary[MAXCPU]; 1678 1679 tcp_seq 1680 tcp_new_isn(struct tcpcb *tp) 1681 { 1682 struct tcp_isn *isn; 1683 tcp_seq new_isn; 1684 tcp_seq digest[16 / sizeof(tcp_seq)]; 1685 int n; 1686 1687 isn = &tcp_isn_ary[mycpuid]; 1688 1689 /* 1690 * Reseed every 20 seconds. 6 reseeds per 2-minute interval in 1691 * order to retain our monotonic offset. 1692 * 1693 * The initial seed randomizes last_offset with all 32 bits. 1694 * 1695 * Note that the md5 digest is masked with 0x0FFFFFFF, so we must 1696 * add 1/16 of our full range (1/8 of our signed range) to ensure 1697 * monotonic operation. 1698 */ 1699 if (isn->last_reseed == 0 || 1700 (u_int)(ticks - isn->last_reseed) > tcp_isn_reseed_interval * hz) { 1701 if (isn->last_reseed == 0) { 1702 read_random(&isn->last_offset, 1703 sizeof(isn->last_offset), 1); 1704 } 1705 read_random(&isn->secret, sizeof(isn->secret), 1); 1706 isn->last_reseed = ticks; 1707 isn->last_offset += 0x10000000; 1708 } 1709 1710 /* 1711 * Compute the md5 hash, giving us a deterministic result for the 1712 * port/address pair for any given secret. 1713 */ 1714 MD5Init(&isn->ctx); 1715 MD5Update(&isn->ctx, isn->secret, sizeof(isn->secret)); 1716 MD5Update(&isn->ctx, (u_char *)&tp->t_inpcb->inp_fport, 2); 1717 MD5Update(&isn->ctx, (u_char *)&tp->t_inpcb->inp_lport, 2); 1718 #ifdef INET6 1719 if (INP_ISIPV6(tp->t_inpcb)) { 1720 MD5Update(&isn->ctx, (u_char *)&tp->t_inpcb->in6p_faddr, 1721 sizeof(struct in6_addr)); 1722 MD5Update(&isn->ctx, (u_char *)&tp->t_inpcb->in6p_laddr, 1723 sizeof(struct in6_addr)); 1724 } else 1725 #endif 1726 { 1727 MD5Update(&isn->ctx, (u_char *)&tp->t_inpcb->inp_faddr, 1728 sizeof(struct in_addr)); 1729 MD5Update(&isn->ctx, (u_char *)&tp->t_inpcb->inp_laddr, 1730 sizeof(struct in_addr)); 1731 } 1732 MD5Final((char *)digest, &isn->ctx); 1733 1734 /* 1735 * Add a random component 0-1048575 plus advance by 1048576. 1736 * 1737 * The sequence space is simply too small, in modern times we also 1738 * must depend on the receive-side being a bit smarter when recycling 1739 * ports in TIME_WAIT. 1740 */ 1741 read_random(&n, sizeof(n), 1); 1742 isn->last_offset += (n & 0x000FFFFF) + 0x00100000; 1743 new_isn = (digest[0] & 0x0FFFFFFF) + isn->last_offset; 1744 1745 return (new_isn); 1746 } 1747 1748 /* 1749 * When a source quench is received, close congestion window 1750 * to one segment. We will gradually open it again as we proceed. 1751 */ 1752 void 1753 tcp_quench(struct inpcb *inp, int error) 1754 { 1755 struct tcpcb *tp = intotcpcb(inp); 1756 1757 KASSERT(tp != NULL, ("tcp_quench: tp is NULL")); 1758 tp->snd_cwnd = tp->t_maxseg; 1759 tp->snd_wacked = 0; 1760 } 1761 1762 /* 1763 * When a specific ICMP unreachable message is received and the 1764 * connection state is SYN-SENT, drop the connection. This behavior 1765 * is controlled by the icmp_may_rst sysctl. 1766 */ 1767 void 1768 tcp_drop_syn_sent(struct inpcb *inp, int error) 1769 { 1770 struct tcpcb *tp = intotcpcb(inp); 1771 1772 KASSERT(tp != NULL, ("tcp_drop_syn_sent: tp is NULL")); 1773 if (tp->t_state == TCPS_SYN_SENT) 1774 tcp_drop(tp, error); 1775 } 1776 1777 /* 1778 * When a `need fragmentation' ICMP is received, update our idea of the MSS 1779 * based on the new value in the route. Also nudge TCP to send something, 1780 * since we know the packet we just sent was dropped. 1781 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1782 */ 1783 void 1784 tcp_mtudisc(struct inpcb *inp, int mtu) 1785 { 1786 struct tcpcb *tp = intotcpcb(inp); 1787 struct rtentry *rt; 1788 struct socket *so = inp->inp_socket; 1789 int maxopd, mss; 1790 #ifdef INET6 1791 boolean_t isipv6 = INP_ISIPV6(inp); 1792 #else 1793 const boolean_t isipv6 = FALSE; 1794 #endif 1795 1796 KASSERT(tp != NULL, ("tcp_mtudisc: tp is NULL")); 1797 1798 /* 1799 * If no MTU is provided in the ICMP message, use the 1800 * next lower likely value, as specified in RFC 1191. 1801 */ 1802 if (mtu == 0) { 1803 int oldmtu; 1804 1805 oldmtu = tp->t_maxopd + 1806 (isipv6 ? 1807 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1808 sizeof(struct tcpiphdr)); 1809 mtu = ip_next_mtu(oldmtu, 0); 1810 } 1811 1812 if (isipv6) 1813 rt = tcp_rtlookup6(&inp->inp_inc); 1814 else 1815 rt = tcp_rtlookup(&inp->inp_inc); 1816 if (rt != NULL) { 1817 if (rt->rt_rmx.rmx_mtu != 0 && rt->rt_rmx.rmx_mtu < mtu) 1818 mtu = rt->rt_rmx.rmx_mtu; 1819 1820 maxopd = mtu - 1821 (isipv6 ? 1822 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1823 sizeof(struct tcpiphdr)); 1824 1825 /* 1826 * XXX - The following conditional probably violates the TCP 1827 * spec. The problem is that, since we don't know the 1828 * other end's MSS, we are supposed to use a conservative 1829 * default. But, if we do that, then MTU discovery will 1830 * never actually take place, because the conservative 1831 * default is much less than the MTUs typically seen 1832 * on the Internet today. For the moment, we'll sweep 1833 * this under the carpet. 1834 * 1835 * The conservative default might not actually be a problem 1836 * if the only case this occurs is when sending an initial 1837 * SYN with options and data to a host we've never talked 1838 * to before. Then, they will reply with an MSS value which 1839 * will get recorded and the new parameters should get 1840 * recomputed. For Further Study. 1841 */ 1842 if (rt->rt_rmx.rmx_mssopt && rt->rt_rmx.rmx_mssopt < maxopd) 1843 maxopd = rt->rt_rmx.rmx_mssopt; 1844 } else 1845 maxopd = mtu - 1846 (isipv6 ? 1847 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1848 sizeof(struct tcpiphdr)); 1849 1850 if (tp->t_maxopd <= maxopd) 1851 return; 1852 tp->t_maxopd = maxopd; 1853 1854 mss = maxopd; 1855 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP | TF_NOOPT)) == 1856 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) 1857 mss -= TCPOLEN_TSTAMP_APPA; 1858 1859 /* round down to multiple of MCLBYTES */ 1860 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */ 1861 if (mss > MCLBYTES) 1862 mss &= ~(MCLBYTES - 1); 1863 #else 1864 if (mss > MCLBYTES) 1865 mss = rounddown(mss, MCLBYTES); 1866 #endif 1867 1868 if (so->so_snd.ssb_hiwat < mss) 1869 mss = so->so_snd.ssb_hiwat; 1870 1871 tp->t_maxseg = mss; 1872 tp->t_rtttime = 0; 1873 tp->snd_nxt = tp->snd_una; 1874 tcp_output(tp); 1875 tcpstat.tcps_mturesent++; 1876 } 1877 1878 /* 1879 * Look-up the routing entry to the peer of this inpcb. If no route 1880 * is found and it cannot be allocated the return NULL. This routine 1881 * is called by TCP routines that access the rmx structure and by tcp_mss 1882 * to get the interface MTU. 1883 */ 1884 struct rtentry * 1885 tcp_rtlookup(struct in_conninfo *inc) 1886 { 1887 struct route *ro = &inc->inc_route; 1888 1889 if (ro->ro_rt == NULL || !(ro->ro_rt->rt_flags & RTF_UP)) { 1890 /* No route yet, so try to acquire one */ 1891 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1892 /* 1893 * unused portions of the structure MUST be zero'd 1894 * out because rtalloc() treats it as opaque data 1895 */ 1896 bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); 1897 ro->ro_dst.sa_family = AF_INET; 1898 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1899 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1900 inc->inc_faddr; 1901 rtalloc(ro); 1902 } 1903 } 1904 return (ro->ro_rt); 1905 } 1906 1907 #ifdef INET6 1908 struct rtentry * 1909 tcp_rtlookup6(struct in_conninfo *inc) 1910 { 1911 struct route_in6 *ro6 = &inc->inc6_route; 1912 1913 if (ro6->ro_rt == NULL || !(ro6->ro_rt->rt_flags & RTF_UP)) { 1914 /* No route yet, so try to acquire one */ 1915 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1916 /* 1917 * unused portions of the structure MUST be zero'd 1918 * out because rtalloc() treats it as opaque data 1919 */ 1920 bzero(&ro6->ro_dst, sizeof(struct sockaddr_in6)); 1921 ro6->ro_dst.sin6_family = AF_INET6; 1922 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1923 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1924 rtalloc((struct route *)ro6); 1925 } 1926 } 1927 return (ro6->ro_rt); 1928 } 1929 #endif 1930 1931 /* 1932 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1933 * 1934 * This code attempts to calculate the bandwidth-delay product as a 1935 * means of determining the optimal window size to maximize bandwidth, 1936 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1937 * routers. This code also does a fairly good job keeping RTTs in check 1938 * across slow links like modems. We implement an algorithm which is very 1939 * similar (but not meant to be) TCP/Vegas. The code operates on the 1940 * transmitter side of a TCP connection and so only effects the transmit 1941 * side of the connection. 1942 * 1943 * BACKGROUND: TCP makes no provision for the management of buffer space 1944 * at the end points or at the intermediate routers and switches. A TCP 1945 * stream, whether using NewReno or not, will eventually buffer as 1946 * many packets as it is able and the only reason this typically works is 1947 * due to the fairly small default buffers made available for a connection 1948 * (typicaly 16K or 32K). As machines use larger windows and/or window 1949 * scaling it is now fairly easy for even a single TCP connection to blow-out 1950 * all available buffer space not only on the local interface, but on 1951 * intermediate routers and switches as well. NewReno makes a misguided 1952 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1953 * then backing off, then steadily increasing the window again until another 1954 * failure occurs, ad-infinitum. This results in terrible oscillation that 1955 * is only made worse as network loads increase and the idea of intentionally 1956 * blowing out network buffers is, frankly, a terrible way to manage network 1957 * resources. 1958 * 1959 * It is far better to limit the transmit window prior to the failure 1960 * condition being achieved. There are two general ways to do this: First 1961 * you can 'scan' through different transmit window sizes and locate the 1962 * point where the RTT stops increasing, indicating that you have filled the 1963 * pipe, then scan backwards until you note that RTT stops decreasing, then 1964 * repeat ad-infinitum. This method works in principle but has severe 1965 * implementation issues due to RTT variances, timer granularity, and 1966 * instability in the algorithm which can lead to many false positives and 1967 * create oscillations as well as interact badly with other TCP streams 1968 * implementing the same algorithm. 1969 * 1970 * The second method is to limit the window to the bandwidth delay product 1971 * of the link. This is the method we implement. RTT variances and our 1972 * own manipulation of the congestion window, bwnd, can potentially 1973 * destabilize the algorithm. For this reason we have to stabilize the 1974 * elements used to calculate the window. We do this by using the minimum 1975 * observed RTT, the long term average of the observed bandwidth, and 1976 * by adding two segments worth of slop. It isn't perfect but it is able 1977 * to react to changing conditions and gives us a very stable basis on 1978 * which to extend the algorithm. 1979 */ 1980 void 1981 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1982 { 1983 u_long bw; 1984 u_long ibw; 1985 u_long bwnd; 1986 int save_ticks; 1987 int delta_ticks; 1988 1989 /* 1990 * If inflight_enable is disabled in the middle of a tcp connection, 1991 * make sure snd_bwnd is effectively disabled. 1992 */ 1993 if (!tcp_inflight_enable) { 1994 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1995 tp->snd_bandwidth = 0; 1996 return; 1997 } 1998 1999 /* 2000 * Validate the delta time. If a connection is new or has been idle 2001 * a long time we have to reset the bandwidth calculator. 2002 */ 2003 save_ticks = ticks; 2004 cpu_ccfence(); 2005 delta_ticks = save_ticks - tp->t_bw_rtttime; 2006 if (tp->t_bw_rtttime == 0 || delta_ticks < 0 || delta_ticks > hz * 10) { 2007 tp->t_bw_rtttime = save_ticks; 2008 tp->t_bw_rtseq = ack_seq; 2009 if (tp->snd_bandwidth == 0) 2010 tp->snd_bandwidth = tcp_inflight_start; 2011 return; 2012 } 2013 2014 /* 2015 * A delta of at least 1 tick is required. Waiting 2 ticks will 2016 * result in better (bw) accuracy. More than that and the ramp-up 2017 * will be too slow. 2018 */ 2019 if (delta_ticks == 0 || delta_ticks == 1) 2020 return; 2021 2022 /* 2023 * Sanity check, plus ignore pure window update acks. 2024 */ 2025 if ((int)(ack_seq - tp->t_bw_rtseq) <= 0) 2026 return; 2027 2028 /* 2029 * Figure out the bandwidth. Due to the tick granularity this 2030 * is a very rough number and it MUST be averaged over a fairly 2031 * long period of time. XXX we need to take into account a link 2032 * that is not using all available bandwidth, but for now our 2033 * slop will ramp us up if this case occurs and the bandwidth later 2034 * increases. 2035 */ 2036 ibw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / delta_ticks; 2037 tp->t_bw_rtttime = save_ticks; 2038 tp->t_bw_rtseq = ack_seq; 2039 bw = ((int64_t)tp->snd_bandwidth * 15 + ibw) >> 4; 2040 2041 tp->snd_bandwidth = bw; 2042 2043 /* 2044 * Calculate the semi-static bandwidth delay product, plus two maximal 2045 * segments. The additional slop puts us squarely in the sweet 2046 * spot and also handles the bandwidth run-up case. Without the 2047 * slop we could be locking ourselves into a lower bandwidth. 2048 * 2049 * At very high speeds the bw calculation can become overly sensitive 2050 * and error prone when delta_ticks is low (e.g. usually 1). To deal 2051 * with the problem the stab must be scaled to the bw. A stab of 50 2052 * (the default) increases the bw for the purposes of the bwnd 2053 * calculation by 5%. 2054 * 2055 * Situations Handled: 2056 * (1) Prevents over-queueing of packets on LANs, especially on 2057 * high speed LANs, allowing larger TCP buffers to be 2058 * specified, and also does a good job preventing 2059 * over-queueing of packets over choke points like modems 2060 * (at least for the transmit side). 2061 * 2062 * (2) Is able to handle changing network loads (bandwidth 2063 * drops so bwnd drops, bandwidth increases so bwnd 2064 * increases). 2065 * 2066 * (3) Theoretically should stabilize in the face of multiple 2067 * connections implementing the same algorithm (this may need 2068 * a little work). 2069 * 2070 * (4) Stability value (defaults to 20 = 2 maximal packets) can 2071 * be adjusted with a sysctl but typically only needs to be on 2072 * very slow connections. A value no smaller then 5 should 2073 * be used, but only reduce this default if you have no other 2074 * choice. 2075 */ 2076 2077 #define USERTT ((tp->t_srtt + tp->t_rttvar) + tcp_inflight_adjrtt) 2078 bw += bw * tcp_inflight_stab / 1000; 2079 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 2080 (int)tp->t_maxseg * 2; 2081 #undef USERTT 2082 2083 if (tcp_inflight_debug > 0) { 2084 static int ltime; 2085 if ((u_int)(save_ticks - ltime) >= hz / tcp_inflight_debug) { 2086 ltime = save_ticks; 2087 kprintf("%p ibw %ld bw %ld rttvar %d srtt %d " 2088 "bwnd %ld delta %d snd_win %ld\n", 2089 tp, ibw, bw, tp->t_rttvar, tp->t_srtt, 2090 bwnd, delta_ticks, tp->snd_wnd); 2091 } 2092 } 2093 if ((long)bwnd < tcp_inflight_min) 2094 bwnd = tcp_inflight_min; 2095 if (bwnd > tcp_inflight_max) 2096 bwnd = tcp_inflight_max; 2097 if ((long)bwnd < tp->t_maxseg * 2) 2098 bwnd = tp->t_maxseg * 2; 2099 tp->snd_bwnd = bwnd; 2100 } 2101 2102 static void 2103 tcp_rmx_iwsegs(struct tcpcb *tp, u_long *maxsegs, u_long *capsegs) 2104 { 2105 struct rtentry *rt; 2106 struct inpcb *inp = tp->t_inpcb; 2107 #ifdef INET6 2108 boolean_t isipv6 = INP_ISIPV6(inp); 2109 #else 2110 const boolean_t isipv6 = FALSE; 2111 #endif 2112 2113 /* XXX */ 2114 if (tcp_iw_maxsegs < TCP_IW_MAXSEGS_DFLT) 2115 tcp_iw_maxsegs = TCP_IW_MAXSEGS_DFLT; 2116 if (tcp_iw_capsegs < TCP_IW_CAPSEGS_DFLT) 2117 tcp_iw_capsegs = TCP_IW_CAPSEGS_DFLT; 2118 2119 if (isipv6) 2120 rt = tcp_rtlookup6(&inp->inp_inc); 2121 else 2122 rt = tcp_rtlookup(&inp->inp_inc); 2123 if (rt == NULL || 2124 rt->rt_rmx.rmx_iwmaxsegs < TCP_IW_MAXSEGS_DFLT || 2125 rt->rt_rmx.rmx_iwcapsegs < TCP_IW_CAPSEGS_DFLT) { 2126 *maxsegs = tcp_iw_maxsegs; 2127 *capsegs = tcp_iw_capsegs; 2128 return; 2129 } 2130 *maxsegs = rt->rt_rmx.rmx_iwmaxsegs; 2131 *capsegs = rt->rt_rmx.rmx_iwcapsegs; 2132 } 2133 2134 u_long 2135 tcp_initial_window(struct tcpcb *tp) 2136 { 2137 if (tcp_do_rfc3390) { 2138 /* 2139 * RFC3390: 2140 * "If the SYN or SYN/ACK is lost, the initial window 2141 * used by a sender after a correctly transmitted SYN 2142 * MUST be one segment consisting of MSS bytes." 2143 * 2144 * However, we do something a little bit more aggressive 2145 * then RFC3390 here: 2146 * - Only if time spent in the SYN or SYN|ACK retransmition 2147 * >= 3 seconds, the IW is reduced. We do this mainly 2148 * because when RFC3390 is published, the initial RTO is 2149 * still 3 seconds (the threshold we test here), while 2150 * after RFC6298, the initial RTO is 1 second. This 2151 * behaviour probably still falls within the spirit of 2152 * RFC3390. 2153 * - When IW is reduced, 2*MSS is used instead of 1*MSS. 2154 * Mainly to avoid sender and receiver deadlock until 2155 * delayed ACK timer expires. And even RFC2581 does not 2156 * try to reduce IW upon SYN or SYN|ACK retransmition 2157 * timeout. 2158 * 2159 * See also: 2160 * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03 2161 */ 2162 if (tp->t_rxtsyn >= TCPTV_RTOBASE3) { 2163 return (2 * tp->t_maxseg); 2164 } else { 2165 u_long maxsegs, capsegs; 2166 2167 tcp_rmx_iwsegs(tp, &maxsegs, &capsegs); 2168 return min(maxsegs * tp->t_maxseg, 2169 max(2 * tp->t_maxseg, capsegs * 1460)); 2170 } 2171 } else { 2172 /* 2173 * Even RFC2581 (back to 1999) allows 2*SMSS IW. 2174 * 2175 * Mainly to avoid sender and receiver deadlock 2176 * until delayed ACK timer expires. 2177 */ 2178 return (2 * tp->t_maxseg); 2179 } 2180 } 2181 2182 #ifdef TCP_SIGNATURE 2183 /* 2184 * Compute TCP-MD5 hash of a TCP segment. (RFC2385) 2185 * 2186 * We do this over ip, tcphdr, segment data, and the key in the SADB. 2187 * When called from tcp_input(), we can be sure that th_sum has been 2188 * zeroed out and verified already. 2189 * 2190 * Return 0 if successful, otherwise return -1. 2191 * 2192 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a 2193 * search with the destination IP address, and a 'magic SPI' to be 2194 * determined by the application. This is hardcoded elsewhere to 1179 2195 * right now. Another branch of this code exists which uses the SPD to 2196 * specify per-application flows but it is unstable. 2197 */ 2198 int 2199 tcpsignature_compute( 2200 struct mbuf *m, /* mbuf chain */ 2201 int len, /* length of TCP data */ 2202 int optlen, /* length of TCP options */ 2203 u_char *buf, /* storage for MD5 digest */ 2204 u_int direction) /* direction of flow */ 2205 { 2206 struct ippseudo ippseudo; 2207 MD5_CTX ctx; 2208 int doff; 2209 struct ip *ip; 2210 struct ipovly *ipovly; 2211 struct secasvar *sav; 2212 struct tcphdr *th; 2213 #ifdef INET6 2214 struct ip6_hdr *ip6; 2215 struct in6_addr in6; 2216 uint32_t plen; 2217 uint16_t nhdr; 2218 #endif /* INET6 */ 2219 u_short savecsum; 2220 2221 KASSERT(m != NULL, ("passed NULL mbuf. Game over.")); 2222 KASSERT(buf != NULL, ("passed NULL storage pointer for MD5 signature")); 2223 /* 2224 * Extract the destination from the IP header in the mbuf. 2225 */ 2226 ip = mtod(m, struct ip *); 2227 #ifdef INET6 2228 ip6 = NULL; /* Make the compiler happy. */ 2229 #endif /* INET6 */ 2230 /* 2231 * Look up an SADB entry which matches the address found in 2232 * the segment. 2233 */ 2234 switch (IP_VHL_V(ip->ip_vhl)) { 2235 case IPVERSION: 2236 sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, 2237 IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2238 break; 2239 #ifdef INET6 2240 case (IPV6_VERSION >> 4): 2241 ip6 = mtod(m, struct ip6_hdr *); 2242 sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, 2243 IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2244 break; 2245 #endif /* INET6 */ 2246 default: 2247 return (EINVAL); 2248 /* NOTREACHED */ 2249 break; 2250 } 2251 if (sav == NULL) { 2252 kprintf("%s: SADB lookup failed\n", __func__); 2253 return (EINVAL); 2254 } 2255 MD5Init(&ctx); 2256 2257 /* 2258 * Step 1: Update MD5 hash with IP pseudo-header. 2259 * 2260 * XXX The ippseudo header MUST be digested in network byte order, 2261 * or else we'll fail the regression test. Assume all fields we've 2262 * been doing arithmetic on have been in host byte order. 2263 * XXX One cannot depend on ipovly->ih_len here. When called from 2264 * tcp_output(), the underlying ip_len member has not yet been set. 2265 */ 2266 switch (IP_VHL_V(ip->ip_vhl)) { 2267 case IPVERSION: 2268 ipovly = (struct ipovly *)ip; 2269 ippseudo.ippseudo_src = ipovly->ih_src; 2270 ippseudo.ippseudo_dst = ipovly->ih_dst; 2271 ippseudo.ippseudo_pad = 0; 2272 ippseudo.ippseudo_p = IPPROTO_TCP; 2273 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen); 2274 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo)); 2275 th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip)); 2276 doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen; 2277 break; 2278 #ifdef INET6 2279 /* 2280 * RFC 2385, 2.0 Proposal 2281 * For IPv6, the pseudo-header is as described in RFC 2460, namely the 2282 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero- 2283 * extended next header value (to form 32 bits), and 32-bit segment 2284 * length. 2285 * Note: Upper-Layer Packet Length comes before Next Header. 2286 */ 2287 case (IPV6_VERSION >> 4): 2288 in6 = ip6->ip6_src; 2289 in6_clearscope(&in6); 2290 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); 2291 in6 = ip6->ip6_dst; 2292 in6_clearscope(&in6); 2293 MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr)); 2294 plen = htonl(len + sizeof(struct tcphdr) + optlen); 2295 MD5Update(&ctx, (char *)&plen, sizeof(uint32_t)); 2296 nhdr = 0; 2297 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2298 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2299 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2300 nhdr = IPPROTO_TCP; 2301 MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t)); 2302 th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr)); 2303 doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen; 2304 break; 2305 #endif /* INET6 */ 2306 default: 2307 return (EINVAL); 2308 /* NOTREACHED */ 2309 break; 2310 } 2311 /* 2312 * Step 2: Update MD5 hash with TCP header, excluding options. 2313 * The TCP checksum must be set to zero. 2314 */ 2315 savecsum = th->th_sum; 2316 th->th_sum = 0; 2317 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr)); 2318 th->th_sum = savecsum; 2319 /* 2320 * Step 3: Update MD5 hash with TCP segment data. 2321 * Use m_apply() to avoid an early m_pullup(). 2322 */ 2323 if (len > 0) 2324 m_apply(m, doff, len, tcpsignature_apply, &ctx); 2325 /* 2326 * Step 4: Update MD5 hash with shared secret. 2327 */ 2328 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); 2329 MD5Final(buf, &ctx); 2330 key_sa_recordxfer(sav, m); 2331 key_freesav(sav); 2332 return (0); 2333 } 2334 2335 int 2336 tcpsignature_apply(void *fstate, void *data, unsigned int len) 2337 { 2338 2339 MD5Update((MD5_CTX *)fstate, (unsigned char *)data, len); 2340 return (0); 2341 } 2342 #endif /* TCP_SIGNATURE */ 2343 2344 static void 2345 tcp_drop_sysctl_dispatch(netmsg_t nmsg) 2346 { 2347 struct lwkt_msg *lmsg = &nmsg->lmsg; 2348 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 2349 struct sockaddr_storage *addrs = lmsg->u.ms_resultp; 2350 int error; 2351 struct sockaddr_in *fin, *lin; 2352 #ifdef INET6 2353 struct sockaddr_in6 *fin6, *lin6; 2354 struct in6_addr f6, l6; 2355 #endif 2356 struct inpcb *inp; 2357 2358 switch (addrs[0].ss_family) { 2359 #ifdef INET6 2360 case AF_INET6: 2361 fin6 = (struct sockaddr_in6 *)&addrs[0]; 2362 lin6 = (struct sockaddr_in6 *)&addrs[1]; 2363 error = in6_embedscope(&f6, fin6, NULL, NULL); 2364 if (error) 2365 goto done; 2366 error = in6_embedscope(&l6, lin6, NULL, NULL); 2367 if (error) 2368 goto done; 2369 inp = in6_pcblookup_hash(&tcbinfo[mycpuid], &f6, 2370 fin6->sin6_port, &l6, lin6->sin6_port, FALSE, NULL); 2371 break; 2372 #endif 2373 #ifdef INET 2374 case AF_INET: 2375 fin = (struct sockaddr_in *)&addrs[0]; 2376 lin = (struct sockaddr_in *)&addrs[1]; 2377 inp = in_pcblookup_hash(&tcbinfo[mycpuid], fin->sin_addr, 2378 fin->sin_port, lin->sin_addr, lin->sin_port, FALSE, NULL); 2379 break; 2380 #endif 2381 default: 2382 /* 2383 * Must not reach here, since the address family was 2384 * checked in sysctl handler. 2385 */ 2386 panic("unknown address family %d", addrs[0].ss_family); 2387 } 2388 if (inp != NULL) { 2389 struct tcpcb *tp = intotcpcb(inp); 2390 2391 KASSERT((inp->inp_flags & INP_WILDCARD) == 0, 2392 ("in wildcard hash")); 2393 KASSERT(tp != NULL, ("tcp_drop_sysctl_dispatch: tp is NULL")); 2394 KASSERT((tp->t_flags & TF_LISTEN) == 0, ("listen socket")); 2395 tcp_drop(tp, ECONNABORTED); 2396 error = 0; 2397 } else { 2398 error = ESRCH; 2399 } 2400 #ifdef INET6 2401 done: 2402 #endif 2403 lwkt_replymsg(lmsg, error); 2404 } 2405 2406 static int 2407 sysctl_tcp_drop(SYSCTL_HANDLER_ARGS) 2408 { 2409 /* addrs[0] is a foreign socket, addrs[1] is a local one. */ 2410 struct sockaddr_storage addrs[2]; 2411 struct sockaddr_in *fin, *lin; 2412 #ifdef INET6 2413 struct sockaddr_in6 *fin6, *lin6; 2414 #endif 2415 struct netmsg_base nmsg; 2416 struct lwkt_msg *lmsg = &nmsg.lmsg; 2417 struct lwkt_port *port = NULL; 2418 int error; 2419 2420 fin = lin = NULL; 2421 #ifdef INET6 2422 fin6 = lin6 = NULL; 2423 #endif 2424 error = 0; 2425 2426 if (req->oldptr != NULL || req->oldlen != 0) 2427 return (EINVAL); 2428 if (req->newptr == NULL) 2429 return (EPERM); 2430 if (req->newlen < sizeof(addrs)) 2431 return (ENOMEM); 2432 error = SYSCTL_IN(req, &addrs, sizeof(addrs)); 2433 if (error) 2434 return (error); 2435 2436 switch (addrs[0].ss_family) { 2437 #ifdef INET6 2438 case AF_INET6: 2439 fin6 = (struct sockaddr_in6 *)&addrs[0]; 2440 lin6 = (struct sockaddr_in6 *)&addrs[1]; 2441 if (fin6->sin6_len != sizeof(struct sockaddr_in6) || 2442 lin6->sin6_len != sizeof(struct sockaddr_in6)) 2443 return (EINVAL); 2444 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr) || 2445 IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) 2446 return (EADDRNOTAVAIL); 2447 #if 0 2448 error = sa6_embedscope(fin6, V_ip6_use_defzone); 2449 if (error) 2450 return (error); 2451 error = sa6_embedscope(lin6, V_ip6_use_defzone); 2452 if (error) 2453 return (error); 2454 #endif 2455 port = tcp6_addrport(); 2456 break; 2457 #endif 2458 #ifdef INET 2459 case AF_INET: 2460 fin = (struct sockaddr_in *)&addrs[0]; 2461 lin = (struct sockaddr_in *)&addrs[1]; 2462 if (fin->sin_len != sizeof(struct sockaddr_in) || 2463 lin->sin_len != sizeof(struct sockaddr_in)) 2464 return (EINVAL); 2465 port = tcp_addrport(fin->sin_addr.s_addr, fin->sin_port, 2466 lin->sin_addr.s_addr, lin->sin_port); 2467 break; 2468 #endif 2469 default: 2470 return (EINVAL); 2471 } 2472 2473 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 2474 tcp_drop_sysctl_dispatch); 2475 lmsg->u.ms_resultp = addrs; 2476 return lwkt_domsg(port, lmsg, 0); 2477 } 2478 2479 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, drop, 2480 CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP, NULL, 2481 0, sysctl_tcp_drop, "", "Drop TCP connection"); 2482 2483 static int 2484 sysctl_tcps_count(SYSCTL_HANDLER_ARGS) 2485 { 2486 u_long state_count[TCP_NSTATES]; 2487 int cpu; 2488 2489 memset(state_count, 0, sizeof(state_count)); 2490 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 2491 int i; 2492 2493 for (i = 0; i < TCP_NSTATES; ++i) 2494 state_count[i] += tcpstate_count[cpu].tcps_count[i]; 2495 } 2496 2497 return sysctl_handle_opaque(oidp, state_count, sizeof(state_count), req); 2498 } 2499 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, state_count, 2500 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 2501 sysctl_tcps_count, "LU", "TCP connection counts by state"); 2502 2503 void 2504 tcp_pcbport_create(struct tcpcb *tp) 2505 { 2506 int cpu; 2507 2508 KASSERT((tp->t_flags & TF_LISTEN) && tp->t_state == TCPS_LISTEN, 2509 ("not a listen tcpcb")); 2510 2511 KASSERT(tp->t_pcbport == NULL, ("tcpcb port cache was created")); 2512 tp->t_pcbport = 2513 kmalloc(sizeof(struct tcp_pcbport) * netisr_ncpus, 2514 M_PCB, 2515 M_WAITOK | M_CACHEALIGN); 2516 2517 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 2518 struct inpcbport *phd; 2519 2520 phd = &tp->t_pcbport[cpu].t_phd; 2521 LIST_INIT(&phd->phd_pcblist); 2522 /* Though, not used ... */ 2523 phd->phd_port = tp->t_inpcb->inp_lport; 2524 } 2525 } 2526 2527 void 2528 tcp_pcbport_merge_oncpu(struct tcpcb *tp) 2529 { 2530 struct inpcbport *phd; 2531 struct inpcb *inp; 2532 int cpu = mycpuid; 2533 2534 KASSERT(cpu < netisr_ncpus, ("invalid cpu%d", cpu)); 2535 phd = &tp->t_pcbport[cpu].t_phd; 2536 2537 while ((inp = LIST_FIRST(&phd->phd_pcblist)) != NULL) { 2538 KASSERT(inp->inp_phd == phd && inp->inp_porthash == NULL, 2539 ("not on tcpcb port cache")); 2540 LIST_REMOVE(inp, inp_portlist); 2541 in_pcbinsporthash_lport(inp); 2542 KASSERT(inp->inp_phd == tp->t_inpcb->inp_phd && 2543 inp->inp_porthash == tp->t_inpcb->inp_porthash, 2544 ("tcpcb port cache merge failed")); 2545 } 2546 } 2547 2548 void 2549 tcp_pcbport_destroy(struct tcpcb *tp) 2550 { 2551 #ifdef INVARIANTS 2552 int cpu; 2553 2554 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 2555 KASSERT(LIST_EMPTY(&tp->t_pcbport[cpu].t_phd.phd_pcblist), 2556 ("tcpcb port cache is not empty")); 2557 } 2558 #endif 2559 kfree(tp->t_pcbport, M_PCB); 2560 tp->t_pcbport = NULL; 2561 } 2562