1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * All advertising materials mentioning features or use of this software 36 * must display the following acknowledgement: 37 * This product includes software developed by Jeffrey M. Hsu. 38 * 39 * Copyright (c) 2001 Networks Associates Technologies, Inc. 40 * All rights reserved. 41 * 42 * This software was developed for the FreeBSD Project by Jonathan Lemon 43 * and NAI Labs, the Security Research Division of Network Associates, Inc. 44 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 45 * DARPA CHATS research program. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. The name of the author may not be used to endorse or promote 56 * products derived from this software without specific prior written 57 * permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * $FreeBSD: src/sys/netinet/tcp_syncache.c,v 1.5.2.14 2003/02/24 04:02:27 silby Exp $ 72 */ 73 74 #include "opt_inet.h" 75 #include "opt_inet6.h" 76 #include "opt_ipsec.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/sysctl.h> 82 #include <sys/malloc.h> 83 #include <sys/mbuf.h> 84 #include <sys/md5.h> 85 #include <sys/proc.h> /* for proc0 declaration */ 86 #include <sys/random.h> 87 #include <sys/socket.h> 88 #include <sys/socketvar.h> 89 #include <sys/in_cksum.h> 90 91 #include <sys/msgport2.h> 92 #include <net/netmsg2.h> 93 #include <net/netisr2.h> 94 95 #include <net/if.h> 96 #include <net/route.h> 97 98 #include <netinet/in.h> 99 #include <netinet/in_systm.h> 100 #include <netinet/ip.h> 101 #include <netinet/in_var.h> 102 #include <netinet/in_pcb.h> 103 #include <netinet/ip_var.h> 104 #include <netinet/ip6.h> 105 #ifdef INET6 106 #include <netinet/icmp6.h> 107 #include <netinet6/nd6.h> 108 #endif 109 #include <netinet6/ip6_var.h> 110 #include <netinet6/in6_pcb.h> 111 #include <netinet/tcp.h> 112 #include <netinet/tcp_fsm.h> 113 #include <netinet/tcp_seq.h> 114 #include <netinet/tcp_timer.h> 115 #include <netinet/tcp_timer2.h> 116 #include <netinet/tcp_var.h> 117 #include <netinet6/tcp6_var.h> 118 119 #ifdef IPSEC 120 #include <netinet6/ipsec.h> 121 #ifdef INET6 122 #include <netinet6/ipsec6.h> 123 #endif 124 #include <netproto/key/key.h> 125 #endif /*IPSEC*/ 126 127 #ifdef FAST_IPSEC 128 #include <netproto/ipsec/ipsec.h> 129 #ifdef INET6 130 #include <netproto/ipsec/ipsec6.h> 131 #endif 132 #include <netproto/ipsec/key.h> 133 #define IPSEC 134 #endif /*FAST_IPSEC*/ 135 136 static int tcp_syncookies = 1; 137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, 138 &tcp_syncookies, 0, 139 "Use TCP SYN cookies if the syncache overflows"); 140 141 static void syncache_drop(struct syncache *, struct syncache_head *); 142 static void syncache_free(struct syncache *); 143 static void syncache_insert(struct syncache *, struct syncache_head *); 144 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); 145 static int syncache_respond(struct syncache *, struct mbuf *); 146 static struct socket *syncache_socket(struct syncache *, struct socket *, 147 struct mbuf *); 148 static void syncache_timer(void *); 149 static u_int32_t syncookie_generate(struct syncache *); 150 static struct syncache *syncookie_lookup(struct in_conninfo *, 151 struct tcphdr *, struct socket *); 152 153 /* 154 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 155 * 4 retransmits corresponds to a timeout of (3 + 3 + 3 + 3 + 3 == 15) seconds 156 * or (1 + 1 + 2 + 4 + 8 == 16) seconds if RFC6298 is used, the odds are that 157 * the user has given up attempting to connect by then. 158 */ 159 #define SYNCACHE_MAXREXMTS 4 160 161 /* Arbitrary values */ 162 #define TCP_SYNCACHE_HASHSIZE 512 163 #define TCP_SYNCACHE_BUCKETLIMIT 30 164 165 struct netmsg_sc_timer { 166 struct netmsg_base base; 167 struct msgrec *nm_mrec; /* back pointer to containing msgrec */ 168 }; 169 170 struct msgrec { 171 struct netmsg_sc_timer msg; 172 lwkt_port_t port; /* constant after init */ 173 int slot; /* constant after init */ 174 }; 175 176 static void syncache_timer_handler(netmsg_t); 177 178 struct tcp_syncache { 179 u_int hashsize; 180 u_int hashmask; 181 u_int bucket_limit; 182 u_int cache_limit; 183 u_int rexmt_limit; 184 u_int hash_secret; 185 }; 186 static struct tcp_syncache tcp_syncache; 187 188 TAILQ_HEAD(syncache_list, syncache); 189 190 struct tcp_syncache_percpu { 191 struct syncache_head *hashbase; 192 u_int cache_count; 193 struct syncache_list timerq[SYNCACHE_MAXREXMTS + 1]; 194 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; 195 struct msgrec mrec[SYNCACHE_MAXREXMTS + 1]; 196 } __cachealign; 197 static struct tcp_syncache_percpu tcp_syncache_percpu[MAXCPU]; 198 199 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); 200 201 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD, 202 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); 203 204 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD, 205 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); 206 207 /* XXX JH */ 208 #if 0 209 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD, 210 &tcp_syncache.cache_count, 0, "Current number of entries in syncache"); 211 #endif 212 213 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD, 214 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); 215 216 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, 217 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); 218 219 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 220 221 #define SYNCACHE_HASH(inc, mask) \ 222 ((tcp_syncache.hash_secret ^ \ 223 (inc)->inc_faddr.s_addr ^ \ 224 ((inc)->inc_faddr.s_addr >> 16) ^ \ 225 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 226 227 #define SYNCACHE_HASH6(inc, mask) \ 228 ((tcp_syncache.hash_secret ^ \ 229 (inc)->inc6_faddr.s6_addr32[0] ^ \ 230 (inc)->inc6_faddr.s6_addr32[3] ^ \ 231 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 232 233 #define ENDPTS_EQ(a, b) ( \ 234 (a)->ie_fport == (b)->ie_fport && \ 235 (a)->ie_lport == (b)->ie_lport && \ 236 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ 237 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ 238 ) 239 240 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) 241 242 static __inline int 243 syncache_rto(int slot) 244 { 245 if (tcp_low_rtobase) 246 return (TCPTV_RTOBASE * tcp_syn_backoff_low[slot]); 247 else 248 return (TCPTV_RTOBASE * tcp_syn_backoff[slot]); 249 } 250 251 static __inline void 252 syncache_timeout(struct tcp_syncache_percpu *syncache_percpu, 253 struct syncache *sc, int slot) 254 { 255 int rto; 256 257 if (slot > 0) { 258 /* 259 * Record the time that we spent in SYN|ACK 260 * retransmition. 261 * 262 * Needed by RFC3390 and RFC6298. 263 */ 264 sc->sc_rxtused += syncache_rto(slot - 1); 265 } 266 sc->sc_rxtslot = slot; 267 268 rto = syncache_rto(slot); 269 sc->sc_rxttime = ticks + rto; 270 271 TAILQ_INSERT_TAIL(&syncache_percpu->timerq[slot], sc, sc_timerq); 272 if (!callout_active(&syncache_percpu->tt_timerq[slot])) { 273 callout_reset(&syncache_percpu->tt_timerq[slot], rto, 274 syncache_timer, &syncache_percpu->mrec[slot]); 275 } 276 } 277 278 static void 279 syncache_free(struct syncache *sc) 280 { 281 struct rtentry *rt; 282 #ifdef INET6 283 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 284 #else 285 const boolean_t isipv6 = FALSE; 286 #endif 287 288 if (sc->sc_ipopts) 289 m_free(sc->sc_ipopts); 290 291 rt = isipv6 ? sc->sc_route6.ro_rt : sc->sc_route.ro_rt; 292 if (rt != NULL) { 293 /* 294 * If this is the only reference to a protocol-cloned 295 * route, remove it immediately. 296 */ 297 if ((rt->rt_flags & (RTF_WASCLONED | RTF_LLINFO)) == 298 RTF_WASCLONED && rt->rt_refcnt == 1) { 299 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 300 rt_mask(rt), rt->rt_flags, NULL); 301 } 302 RTFREE(rt); 303 } 304 kfree(sc, M_SYNCACHE); 305 } 306 307 void 308 syncache_init(void) 309 { 310 int i, cpu; 311 312 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 313 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 314 tcp_syncache.cache_limit = 315 tcp_syncache.hashsize * tcp_syncache.bucket_limit; 316 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 317 tcp_syncache.hash_secret = karc4random(); 318 319 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 320 &tcp_syncache.hashsize); 321 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 322 &tcp_syncache.cache_limit); 323 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 324 &tcp_syncache.bucket_limit); 325 if (!powerof2(tcp_syncache.hashsize)) { 326 kprintf("WARNING: syncache hash size is not a power of 2.\n"); 327 tcp_syncache.hashsize = 512; /* safe default */ 328 } 329 tcp_syncache.hashmask = tcp_syncache.hashsize - 1; 330 331 for (cpu = 0; cpu < netisr_ncpus; cpu++) { 332 struct tcp_syncache_percpu *syncache_percpu; 333 334 syncache_percpu = &tcp_syncache_percpu[cpu]; 335 /* Allocate the hash table. */ 336 syncache_percpu->hashbase = kmalloc_cachealign( 337 tcp_syncache.hashsize * sizeof(struct syncache_head), 338 M_SYNCACHE, M_WAITOK); 339 340 /* Initialize the hash buckets. */ 341 for (i = 0; i < tcp_syncache.hashsize; i++) { 342 struct syncache_head *bucket; 343 344 bucket = &syncache_percpu->hashbase[i]; 345 TAILQ_INIT(&bucket->sch_bucket); 346 bucket->sch_length = 0; 347 } 348 349 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) { 350 /* Initialize the timer queues. */ 351 TAILQ_INIT(&syncache_percpu->timerq[i]); 352 callout_init_mp(&syncache_percpu->tt_timerq[i]); 353 354 syncache_percpu->mrec[i].slot = i; 355 syncache_percpu->mrec[i].port = netisr_cpuport(cpu); 356 syncache_percpu->mrec[i].msg.nm_mrec = 357 &syncache_percpu->mrec[i]; 358 netmsg_init(&syncache_percpu->mrec[i].msg.base, 359 NULL, &netisr_adone_rport, 360 MSGF_PRIORITY, syncache_timer_handler); 361 } 362 } 363 } 364 365 static void 366 syncache_insert(struct syncache *sc, struct syncache_head *sch) 367 { 368 struct tcp_syncache_percpu *syncache_percpu; 369 struct syncache *sc2; 370 int i; 371 372 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 373 374 /* 375 * Make sure that we don't overflow the per-bucket 376 * limit or the total cache size limit. 377 */ 378 if (sch->sch_length >= tcp_syncache.bucket_limit) { 379 /* 380 * The bucket is full, toss the oldest element. 381 */ 382 sc2 = TAILQ_FIRST(&sch->sch_bucket); 383 if (sc2->sc_tp != NULL) 384 sc2->sc_tp->ts_recent = ticks; 385 syncache_drop(sc2, sch); 386 tcpstat.tcps_sc_bucketoverflow++; 387 } else if (syncache_percpu->cache_count >= tcp_syncache.cache_limit) { 388 /* 389 * The cache is full. Toss the oldest entry in the 390 * entire cache. This is the front entry in the 391 * first non-empty timer queue with the largest 392 * timeout value. 393 */ 394 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { 395 sc2 = TAILQ_FIRST(&syncache_percpu->timerq[i]); 396 while (sc2 && (sc2->sc_flags & SCF_MARKER)) 397 sc2 = TAILQ_NEXT(sc2, sc_timerq); 398 if (sc2 != NULL) 399 break; 400 } 401 if (sc2->sc_tp != NULL) 402 sc2->sc_tp->ts_recent = ticks; 403 syncache_drop(sc2, NULL); 404 tcpstat.tcps_sc_cacheoverflow++; 405 } 406 407 /* Initialize the entry's timer. */ 408 syncache_timeout(syncache_percpu, sc, 0); 409 410 /* Put it into the bucket. */ 411 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash); 412 sch->sch_length++; 413 syncache_percpu->cache_count++; 414 tcpstat.tcps_sc_added++; 415 } 416 417 void 418 syncache_destroy(struct tcpcb *tp, struct tcpcb *tp_inh) 419 { 420 struct tcp_syncache_percpu *syncache_percpu; 421 struct syncache_head *bucket; 422 struct syncache *sc; 423 int i; 424 425 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 426 sc = NULL; 427 428 for (i = 0; i < tcp_syncache.hashsize; i++) { 429 bucket = &syncache_percpu->hashbase[i]; 430 TAILQ_FOREACH(sc, &bucket->sch_bucket, sc_hash) { 431 if (sc->sc_tp == tp) 432 sc->sc_tp = tp_inh; 433 } 434 } 435 } 436 437 static void 438 syncache_drop(struct syncache *sc, struct syncache_head *sch) 439 { 440 struct tcp_syncache_percpu *syncache_percpu; 441 #ifdef INET6 442 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 443 #else 444 const boolean_t isipv6 = FALSE; 445 #endif 446 447 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 448 449 if (sch == NULL) { 450 if (isipv6) { 451 sch = &syncache_percpu->hashbase[ 452 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; 453 } else { 454 sch = &syncache_percpu->hashbase[ 455 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)]; 456 } 457 } 458 459 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 460 sch->sch_length--; 461 syncache_percpu->cache_count--; 462 463 /* 464 * Cleanup 465 */ 466 sc->sc_tp = NULL; 467 468 /* 469 * Remove the entry from the syncache timer/timeout queue. Note 470 * that we do not try to stop any running timer since we do not know 471 * whether the timer's message is in-transit or not. Since timeouts 472 * are fairly long, taking an unneeded callout does not detrimentally 473 * effect performance. 474 */ 475 TAILQ_REMOVE(&syncache_percpu->timerq[sc->sc_rxtslot], sc, sc_timerq); 476 477 syncache_free(sc); 478 } 479 480 /* 481 * Place a timeout message on the TCP thread's message queue. 482 * This routine runs in soft interrupt context. 483 * 484 * An invariant is for this routine to be called, the callout must 485 * have been active. Note that the callout is not deactivated until 486 * after the message has been processed in syncache_timer_handler() below. 487 */ 488 static void 489 syncache_timer(void *p) 490 { 491 struct netmsg_sc_timer *msg = p; 492 493 lwkt_sendmsg_oncpu(msg->nm_mrec->port, &msg->base.lmsg); 494 } 495 496 /* 497 * Service a timer message queued by timer expiration. 498 * This routine runs in the TCP protocol thread. 499 * 500 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 501 * If we have retransmitted an entry the maximum number of times, expire it. 502 * 503 * When we finish processing timed-out entries, we restart the timer if there 504 * are any entries still on the queue and deactivate it otherwise. Only after 505 * a timer has been deactivated here can it be restarted by syncache_timeout(). 506 */ 507 static void 508 syncache_timer_handler(netmsg_t msg) 509 { 510 struct tcp_syncache_percpu *syncache_percpu; 511 struct syncache *sc; 512 struct syncache marker; 513 struct syncache_list *list; 514 struct inpcb *inp; 515 int slot; 516 517 slot = ((struct netmsg_sc_timer *)msg)->nm_mrec->slot; 518 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 519 520 list = &syncache_percpu->timerq[slot]; 521 522 /* 523 * Use a marker to keep our place in the scan. syncache_drop() 524 * can block and cause any next pointer we cache to become stale. 525 */ 526 marker.sc_flags = SCF_MARKER; 527 TAILQ_INSERT_HEAD(list, &marker, sc_timerq); 528 529 while ((sc = TAILQ_NEXT(&marker, sc_timerq)) != NULL) { 530 /* 531 * Move the marker. 532 */ 533 TAILQ_REMOVE(list, &marker, sc_timerq); 534 TAILQ_INSERT_AFTER(list, sc, &marker, sc_timerq); 535 536 if (sc->sc_flags & SCF_MARKER) 537 continue; 538 539 if (ticks < sc->sc_rxttime) 540 break; /* finished because timerq sorted by time */ 541 if (sc->sc_tp == NULL) { 542 syncache_drop(sc, NULL); 543 tcpstat.tcps_sc_stale++; 544 continue; 545 } 546 inp = sc->sc_tp->t_inpcb; 547 if (slot == SYNCACHE_MAXREXMTS || 548 slot >= tcp_syncache.rexmt_limit || 549 inp == NULL || 550 inp->inp_gencnt != sc->sc_inp_gencnt) { 551 syncache_drop(sc, NULL); 552 tcpstat.tcps_sc_stale++; 553 continue; 554 } 555 /* 556 * syncache_respond() may call back into the syncache to 557 * to modify another entry, so do not obtain the next 558 * entry on the timer chain until it has completed. 559 */ 560 syncache_respond(sc, NULL); 561 tcpstat.tcps_sc_retransmitted++; 562 TAILQ_REMOVE(list, sc, sc_timerq); 563 syncache_timeout(syncache_percpu, sc, slot + 1); 564 } 565 TAILQ_REMOVE(list, &marker, sc_timerq); 566 567 if (sc != NULL) { 568 callout_reset(&syncache_percpu->tt_timerq[slot], 569 sc->sc_rxttime - ticks, syncache_timer, 570 &syncache_percpu->mrec[slot]); 571 } else { 572 callout_deactivate(&syncache_percpu->tt_timerq[slot]); 573 } 574 lwkt_replymsg(&msg->base.lmsg, 0); 575 } 576 577 /* 578 * Find an entry in the syncache. 579 */ 580 struct syncache * 581 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp) 582 { 583 struct tcp_syncache_percpu *syncache_percpu; 584 struct syncache *sc; 585 struct syncache_head *sch; 586 587 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 588 #ifdef INET6 589 if (inc->inc_isipv6) { 590 sch = &syncache_percpu->hashbase[ 591 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; 592 *schp = sch; 593 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) 594 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 595 return (sc); 596 } else 597 #endif 598 { 599 sch = &syncache_percpu->hashbase[ 600 SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; 601 *schp = sch; 602 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 603 #ifdef INET6 604 if (sc->sc_inc.inc_isipv6) 605 continue; 606 #endif 607 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 608 return (sc); 609 } 610 } 611 return (NULL); 612 } 613 614 /* 615 * This function is called when we get a RST for a 616 * non-existent connection, so that we can see if the 617 * connection is in the syn cache. If it is, zap it. 618 */ 619 void 620 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th) 621 { 622 struct syncache *sc; 623 struct syncache_head *sch; 624 625 sc = syncache_lookup(inc, &sch); 626 if (sc == NULL) { 627 return; 628 } 629 /* 630 * If the RST bit is set, check the sequence number to see 631 * if this is a valid reset segment. 632 * RFC 793 page 37: 633 * In all states except SYN-SENT, all reset (RST) segments 634 * are validated by checking their SEQ-fields. A reset is 635 * valid if its sequence number is in the window. 636 * 637 * The sequence number in the reset segment is normally an 638 * echo of our outgoing acknowlegement numbers, but some hosts 639 * send a reset with the sequence number at the rightmost edge 640 * of our receive window, and we have to handle this case. 641 */ 642 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 643 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 644 syncache_drop(sc, sch); 645 tcpstat.tcps_sc_reset++; 646 } 647 } 648 649 void 650 syncache_badack(struct in_conninfo *inc) 651 { 652 struct syncache *sc; 653 struct syncache_head *sch; 654 655 sc = syncache_lookup(inc, &sch); 656 if (sc != NULL) { 657 syncache_drop(sc, sch); 658 tcpstat.tcps_sc_badack++; 659 } 660 } 661 662 void 663 syncache_unreach(struct in_conninfo *inc, const struct tcphdr *th) 664 { 665 struct syncache *sc; 666 struct syncache_head *sch; 667 668 /* we are called at splnet() here */ 669 sc = syncache_lookup(inc, &sch); 670 if (sc == NULL) 671 return; 672 673 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 674 if (ntohl(th->th_seq) != sc->sc_iss) 675 return; 676 677 /* 678 * If we've rertransmitted 3 times and this is our second error, 679 * we remove the entry. Otherwise, we allow it to continue on. 680 * This prevents us from incorrectly nuking an entry during a 681 * spurious network outage. 682 * 683 * See tcp_notify(). 684 */ 685 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) { 686 sc->sc_flags |= SCF_UNREACH; 687 return; 688 } 689 syncache_drop(sc, sch); 690 tcpstat.tcps_sc_unreach++; 691 } 692 693 /* 694 * Build a new TCP socket structure from a syncache entry. 695 * 696 * This is called from the context of the SYN+ACK 697 */ 698 static struct socket * 699 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m) 700 { 701 struct inpcb *inp = NULL, *linp; 702 struct socket *so; 703 struct tcpcb *tp, *ltp; 704 lwkt_port_t port; 705 #ifdef INET6 706 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 707 #else 708 const boolean_t isipv6 = FALSE; 709 #endif 710 struct sockaddr_in sin_faddr; 711 struct sockaddr_in6 sin6_faddr; 712 struct sockaddr *faddr; 713 714 KASSERT(m->m_flags & M_HASH, ("mbuf has no hash")); 715 716 if (isipv6) { 717 faddr = (struct sockaddr *)&sin6_faddr; 718 sin6_faddr.sin6_family = AF_INET6; 719 sin6_faddr.sin6_len = sizeof(sin6_faddr); 720 sin6_faddr.sin6_addr = sc->sc_inc.inc6_faddr; 721 sin6_faddr.sin6_port = sc->sc_inc.inc_fport; 722 sin6_faddr.sin6_flowinfo = sin6_faddr.sin6_scope_id = 0; 723 } else { 724 faddr = (struct sockaddr *)&sin_faddr; 725 sin_faddr.sin_family = AF_INET; 726 sin_faddr.sin_len = sizeof(sin_faddr); 727 sin_faddr.sin_addr = sc->sc_inc.inc_faddr; 728 sin_faddr.sin_port = sc->sc_inc.inc_fport; 729 bzero(sin_faddr.sin_zero, sizeof(sin_faddr.sin_zero)); 730 } 731 732 /* 733 * Ok, create the full blown connection, and set things up 734 * as they would have been set up if we had created the 735 * connection when the SYN arrived. If we can't create 736 * the connection, abort it. 737 * 738 * Set the protocol processing port for the socket to the current 739 * port (that the connection came in on). 740 * 741 * NOTE: 742 * We don't keep a reference on the new socket, since its 743 * destruction will run in this thread (netisrN); there is no 744 * race here. 745 */ 746 so = sonewconn_faddr(lso, SS_ISCONNECTED, faddr, 747 FALSE /* don't ref */); 748 if (so == NULL) { 749 /* 750 * Drop the connection; we will send a RST if the peer 751 * retransmits the ACK, 752 */ 753 tcpstat.tcps_listendrop++; 754 goto abort; 755 } 756 757 /* 758 * Insert new socket into hash list. 759 */ 760 inp = so->so_pcb; 761 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; 762 if (isipv6) { 763 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 764 } else { 765 KASSERT(INP_ISIPV4(inp), ("not inet pcb")); 766 inp->inp_laddr = sc->sc_inc.inc_laddr; 767 } 768 inp->inp_lport = sc->sc_inc.inc_lport; 769 770 linp = lso->so_pcb; 771 ltp = intotcpcb(linp); 772 773 tcp_pcbport_insert(ltp, inp); 774 775 #ifdef IPSEC 776 /* copy old policy into new socket's */ 777 if (ipsec_copy_policy(linp->inp_sp, inp->inp_sp)) 778 kprintf("syncache_expand: could not copy policy\n"); 779 #endif 780 if (isipv6) { 781 struct in6_addr laddr6; 782 /* 783 * Inherit socket options from the listening socket. 784 * Note that in6p_inputopts are not (and should not be) 785 * copied, since it stores previously received options and is 786 * used to detect if each new option is different than the 787 * previous one and hence should be passed to a user. 788 * If we copied in6p_inputopts, a user would not be able to 789 * receive options just after calling the accept system call. 790 */ 791 inp->inp_flags |= linp->inp_flags & INP_CONTROLOPTS; 792 if (linp->in6p_outputopts) 793 inp->in6p_outputopts = 794 ip6_copypktopts(linp->in6p_outputopts, M_INTWAIT); 795 inp->in6p_route = sc->sc_route6; 796 sc->sc_route6.ro_rt = NULL; 797 798 laddr6 = inp->in6p_laddr; 799 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 800 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 801 if (in6_pcbconnect(inp, faddr, &thread0)) { 802 inp->in6p_laddr = laddr6; 803 goto abort; 804 } 805 port = tcp6_addrport(); 806 } else { 807 struct in_addr laddr; 808 809 inp->inp_options = ip_srcroute(m); 810 if (inp->inp_options == NULL) { 811 inp->inp_options = sc->sc_ipopts; 812 sc->sc_ipopts = NULL; 813 } 814 inp->inp_route = sc->sc_route; 815 sc->sc_route.ro_rt = NULL; 816 817 laddr = inp->inp_laddr; 818 if (inp->inp_laddr.s_addr == INADDR_ANY) 819 inp->inp_laddr = sc->sc_inc.inc_laddr; 820 if (in_pcbconnect(inp, faddr, &thread0)) { 821 inp->inp_laddr = laddr; 822 goto abort; 823 } 824 825 inp->inp_flags |= INP_HASH; 826 inp->inp_hashval = m->m_pkthdr.hash; 827 port = netisr_hashport(inp->inp_hashval); 828 } 829 830 /* 831 * The current port should be in the context of the SYN+ACK and 832 * so should match the tcp address port. 833 */ 834 KASSERT(port == &curthread->td_msgport, 835 ("TCP PORT MISMATCH %p vs %p\n", port, &curthread->td_msgport)); 836 837 tp = intotcpcb(inp); 838 TCP_STATE_CHANGE(tp, TCPS_SYN_RECEIVED); 839 tp->iss = sc->sc_iss; 840 tp->irs = sc->sc_irs; 841 tcp_rcvseqinit(tp); 842 tcp_sendseqinit(tp); 843 tp->snd_wnd = sc->sc_sndwnd; 844 tp->snd_wl1 = sc->sc_irs; 845 tp->rcv_up = sc->sc_irs + 1; 846 tp->rcv_wnd = sc->sc_wnd; 847 tp->rcv_adv += tp->rcv_wnd; 848 849 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH | TF_NODELAY); 850 if (sc->sc_flags & SCF_NOOPT) 851 tp->t_flags |= TF_NOOPT; 852 if (sc->sc_flags & SCF_WINSCALE) { 853 tp->t_flags |= TF_REQ_SCALE | TF_RCVD_SCALE; 854 tp->snd_scale = sc->sc_requested_s_scale; 855 tp->request_r_scale = sc->sc_request_r_scale; 856 } 857 if (sc->sc_flags & SCF_TIMESTAMP) { 858 tp->t_flags |= TF_REQ_TSTMP | TF_RCVD_TSTMP; 859 tp->ts_recent = sc->sc_tsrecent; 860 tp->ts_recent_age = ticks; 861 } 862 if (sc->sc_flags & SCF_SACK_PERMITTED) 863 tp->t_flags |= TF_SACK_PERMITTED; 864 865 #ifdef TCP_SIGNATURE 866 if (sc->sc_flags & SCF_SIGNATURE) 867 tp->t_flags |= TF_SIGNATURE; 868 #endif /* TCP_SIGNATURE */ 869 870 tp->t_rxtsyn = sc->sc_rxtused; 871 tcp_rmx_init(tp, sc->sc_peer_mss); 872 873 /* 874 * Inherit some properties from the listen socket 875 */ 876 tp->t_keepinit = ltp->t_keepinit; 877 tp->t_keepidle = ltp->t_keepidle; 878 tp->t_keepintvl = ltp->t_keepintvl; 879 tp->t_keepcnt = ltp->t_keepcnt; 880 tp->t_maxidle = ltp->t_maxidle; 881 882 tcp_create_timermsg(tp, port); 883 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepinit, tcp_timer_keep); 884 885 tcpstat.tcps_accepts++; 886 return (so); 887 888 abort: 889 if (so != NULL) 890 soabort_direct(so); 891 return (NULL); 892 } 893 894 /* 895 * This function gets called when we receive an ACK for a 896 * socket in the LISTEN state. We look up the connection 897 * in the syncache, and if its there, we pull it out of 898 * the cache and turn it into a full-blown connection in 899 * the SYN-RECEIVED state. 900 */ 901 int 902 syncache_expand(struct in_conninfo *inc, struct tcphdr *th, struct socket **sop, 903 struct mbuf *m) 904 { 905 struct syncache *sc; 906 struct syncache_head *sch; 907 struct socket *so; 908 909 sc = syncache_lookup(inc, &sch); 910 if (sc == NULL) { 911 /* 912 * There is no syncache entry, so see if this ACK is 913 * a returning syncookie. To do this, first: 914 * A. See if this socket has had a syncache entry dropped in 915 * the past. We don't want to accept a bogus syncookie 916 * if we've never received a SYN. 917 * B. check that the syncookie is valid. If it is, then 918 * cobble up a fake syncache entry, and return. 919 */ 920 if (!tcp_syncookies) 921 return (0); 922 sc = syncookie_lookup(inc, th, *sop); 923 if (sc == NULL) 924 return (0); 925 sch = NULL; 926 tcpstat.tcps_sc_recvcookie++; 927 } 928 929 /* 930 * If seg contains an ACK, but not for our SYN/ACK, send a RST. 931 */ 932 if (th->th_ack != sc->sc_iss + 1) 933 return (0); 934 935 so = syncache_socket(sc, *sop, m); 936 if (so == NULL) { 937 #if 0 938 resetandabort: 939 /* XXXjlemon check this - is this correct? */ 940 tcp_respond(NULL, m, m, th, 941 th->th_seq + tlen, (tcp_seq)0, TH_RST | TH_ACK); 942 #endif 943 m_freem(m); /* XXX only needed for above */ 944 tcpstat.tcps_sc_aborted++; 945 } else { 946 tcpstat.tcps_sc_completed++; 947 } 948 if (sch == NULL) 949 syncache_free(sc); 950 else 951 syncache_drop(sc, sch); 952 *sop = so; 953 return (1); 954 } 955 956 /* 957 * Given a LISTEN socket and an inbound SYN request, add 958 * this to the syn cache, and send back a segment: 959 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 960 * to the source. 961 * 962 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 963 * Doing so would require that we hold onto the data and deliver it 964 * to the application. However, if we are the target of a SYN-flood 965 * DoS attack, an attacker could send data which would eventually 966 * consume all available buffer space if it were ACKed. By not ACKing 967 * the data, we avoid this DoS scenario. 968 */ 969 int 970 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 971 struct socket *so, struct mbuf *m) 972 { 973 struct tcp_syncache_percpu *syncache_percpu; 974 struct tcpcb *tp; 975 struct syncache *sc = NULL; 976 struct syncache_head *sch; 977 struct mbuf *ipopts = NULL; 978 int win; 979 980 KASSERT(m->m_flags & M_HASH, ("mbuf has no hash")); 981 982 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 983 tp = sototcpcb(so); 984 985 /* 986 * Remember the IP options, if any. 987 */ 988 #ifdef INET6 989 if (!inc->inc_isipv6) 990 #endif 991 ipopts = ip_srcroute(m); 992 993 /* 994 * See if we already have an entry for this connection. 995 * If we do, resend the SYN,ACK, and reset the retransmit timer. 996 * 997 * XXX 998 * The syncache should be re-initialized with the contents 999 * of the new SYN which may have different options. 1000 */ 1001 sc = syncache_lookup(inc, &sch); 1002 if (sc != NULL) { 1003 KASSERT(sc->sc_flags & SCF_HASH, ("syncache has no hash")); 1004 KASSERT(sc->sc_hashval == m->m_pkthdr.hash, 1005 ("syncache/mbuf hash mismatches")); 1006 1007 tcpstat.tcps_sc_dupsyn++; 1008 if (ipopts) { 1009 /* 1010 * If we were remembering a previous source route, 1011 * forget it and use the new one we've been given. 1012 */ 1013 if (sc->sc_ipopts) 1014 m_free(sc->sc_ipopts); 1015 sc->sc_ipopts = ipopts; 1016 } 1017 /* 1018 * Update timestamp if present. 1019 */ 1020 if (sc->sc_flags & SCF_TIMESTAMP) 1021 sc->sc_tsrecent = to->to_tsval; 1022 1023 /* Just update the TOF_SACK_PERMITTED for now. */ 1024 if (tcp_do_sack && (to->to_flags & TOF_SACK_PERMITTED)) 1025 sc->sc_flags |= SCF_SACK_PERMITTED; 1026 else 1027 sc->sc_flags &= ~SCF_SACK_PERMITTED; 1028 1029 /* Update initial send window */ 1030 sc->sc_sndwnd = th->th_win; 1031 1032 /* 1033 * PCB may have changed, pick up new values. 1034 */ 1035 sc->sc_tp = tp; 1036 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 1037 if (syncache_respond(sc, m) == 0) { 1038 TAILQ_REMOVE(&syncache_percpu->timerq[sc->sc_rxtslot], 1039 sc, sc_timerq); 1040 syncache_timeout(syncache_percpu, sc, sc->sc_rxtslot); 1041 tcpstat.tcps_sndacks++; 1042 tcpstat.tcps_sndtotal++; 1043 } 1044 return (1); 1045 } 1046 1047 /* 1048 * Fill in the syncache values. 1049 */ 1050 sc = kmalloc(sizeof(struct syncache), M_SYNCACHE, M_WAITOK|M_ZERO); 1051 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 1052 sc->sc_ipopts = ipopts; 1053 sc->sc_inc.inc_fport = inc->inc_fport; 1054 sc->sc_inc.inc_lport = inc->inc_lport; 1055 sc->sc_tp = tp; 1056 #ifdef INET6 1057 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1058 if (inc->inc_isipv6) { 1059 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1060 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1061 sc->sc_route6.ro_rt = NULL; 1062 } else 1063 #endif 1064 { 1065 sc->sc_inc.inc_faddr = inc->inc_faddr; 1066 sc->sc_inc.inc_laddr = inc->inc_laddr; 1067 sc->sc_route.ro_rt = NULL; 1068 } 1069 sc->sc_irs = th->th_seq; 1070 sc->sc_flags = SCF_HASH; 1071 sc->sc_hashval = m->m_pkthdr.hash; 1072 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0; 1073 if (tcp_syncookies) 1074 sc->sc_iss = syncookie_generate(sc); 1075 else 1076 sc->sc_iss = karc4random(); 1077 1078 /* Initial receive window: clip ssb_space to [0 .. TCP_MAXWIN] */ 1079 win = ssb_space(&so->so_rcv); 1080 win = imax(win, 0); 1081 win = imin(win, TCP_MAXWIN); 1082 sc->sc_wnd = win; 1083 1084 if (tcp_do_rfc1323) { 1085 /* 1086 * A timestamp received in a SYN makes 1087 * it ok to send timestamp requests and replies. 1088 */ 1089 if (to->to_flags & TOF_TS) { 1090 sc->sc_tsrecent = to->to_tsval; 1091 sc->sc_flags |= SCF_TIMESTAMP; 1092 } 1093 if (to->to_flags & TOF_SCALE) { 1094 int wscale = TCP_MIN_WINSHIFT; 1095 1096 /* Compute proper scaling value from buffer space */ 1097 while (wscale < TCP_MAX_WINSHIFT && 1098 (TCP_MAXWIN << wscale) < so->so_rcv.ssb_hiwat) { 1099 wscale++; 1100 } 1101 sc->sc_request_r_scale = wscale; 1102 sc->sc_requested_s_scale = to->to_requested_s_scale; 1103 sc->sc_flags |= SCF_WINSCALE; 1104 } 1105 } 1106 if (tcp_do_sack && (to->to_flags & TOF_SACK_PERMITTED)) 1107 sc->sc_flags |= SCF_SACK_PERMITTED; 1108 if (tp->t_flags & TF_NOOPT) 1109 sc->sc_flags = SCF_NOOPT; 1110 #ifdef TCP_SIGNATURE 1111 /* 1112 * If listening socket requested TCP digests, and received SYN 1113 * contains the option, flag this in the syncache so that 1114 * syncache_respond() will do the right thing with the SYN+ACK. 1115 * XXX Currently we always record the option by default and will 1116 * attempt to use it in syncache_respond(). 1117 */ 1118 if (to->to_flags & TOF_SIGNATURE) 1119 sc->sc_flags = SCF_SIGNATURE; 1120 #endif /* TCP_SIGNATURE */ 1121 sc->sc_sndwnd = th->th_win; 1122 1123 if (syncache_respond(sc, m) == 0) { 1124 syncache_insert(sc, sch); 1125 tcpstat.tcps_sndacks++; 1126 tcpstat.tcps_sndtotal++; 1127 } else { 1128 syncache_free(sc); 1129 tcpstat.tcps_sc_dropped++; 1130 } 1131 return (1); 1132 } 1133 1134 static int 1135 syncache_respond(struct syncache *sc, struct mbuf *m) 1136 { 1137 u_int8_t *optp; 1138 int optlen, error; 1139 u_int16_t tlen, hlen, mssopt; 1140 struct ip *ip = NULL; 1141 struct rtentry *rt; 1142 struct tcphdr *th; 1143 struct ip6_hdr *ip6 = NULL; 1144 #ifdef INET6 1145 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 1146 #else 1147 const boolean_t isipv6 = FALSE; 1148 #endif 1149 1150 if (isipv6) { 1151 rt = tcp_rtlookup6(&sc->sc_inc); 1152 if (rt != NULL) 1153 mssopt = rt->rt_ifp->if_mtu - 1154 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); 1155 else 1156 mssopt = tcp_v6mssdflt; 1157 hlen = sizeof(struct ip6_hdr); 1158 } else { 1159 rt = tcp_rtlookup(&sc->sc_inc); 1160 if (rt != NULL) 1161 mssopt = rt->rt_ifp->if_mtu - 1162 (sizeof(struct ip) + sizeof(struct tcphdr)); 1163 else 1164 mssopt = tcp_mssdflt; 1165 hlen = sizeof(struct ip); 1166 } 1167 1168 /* Compute the size of the TCP options. */ 1169 if (sc->sc_flags & SCF_NOOPT) { 1170 optlen = 0; 1171 } else { 1172 optlen = TCPOLEN_MAXSEG + 1173 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) + 1174 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) + 1175 ((sc->sc_flags & SCF_SACK_PERMITTED) ? 1176 TCPOLEN_SACK_PERMITTED_ALIGNED : 0); 1177 #ifdef TCP_SIGNATURE 1178 optlen += ((sc->sc_flags & SCF_SIGNATURE) ? 1179 (TCPOLEN_SIGNATURE + 2) : 0); 1180 #endif /* TCP_SIGNATURE */ 1181 } 1182 tlen = hlen + sizeof(struct tcphdr) + optlen; 1183 1184 /* 1185 * XXX 1186 * assume that the entire packet will fit in a header mbuf 1187 */ 1188 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small")); 1189 1190 /* 1191 * XXX shouldn't this reuse the mbuf if possible ? 1192 * Create the IP+TCP header from scratch. 1193 */ 1194 if (m) 1195 m_freem(m); 1196 1197 m = m_gethdr(M_NOWAIT, MT_HEADER); 1198 if (m == NULL) 1199 return (ENOBUFS); 1200 m->m_data += max_linkhdr; 1201 m->m_len = tlen; 1202 m->m_pkthdr.len = tlen; 1203 m->m_pkthdr.rcvif = NULL; 1204 if (tcp_prio_synack) 1205 m->m_flags |= M_PRIO; 1206 1207 if (isipv6) { 1208 ip6 = mtod(m, struct ip6_hdr *); 1209 ip6->ip6_vfc = IPV6_VERSION; 1210 ip6->ip6_nxt = IPPROTO_TCP; 1211 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1212 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1213 ip6->ip6_plen = htons(tlen - hlen); 1214 /* ip6_hlim is set after checksum */ 1215 /* ip6_flow = ??? */ 1216 1217 th = (struct tcphdr *)(ip6 + 1); 1218 } else { 1219 ip = mtod(m, struct ip *); 1220 ip->ip_v = IPVERSION; 1221 ip->ip_hl = sizeof(struct ip) >> 2; 1222 ip->ip_len = tlen; 1223 ip->ip_id = 0; 1224 ip->ip_off = 0; 1225 ip->ip_sum = 0; 1226 ip->ip_p = IPPROTO_TCP; 1227 ip->ip_src = sc->sc_inc.inc_laddr; 1228 ip->ip_dst = sc->sc_inc.inc_faddr; 1229 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */ 1230 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */ 1231 1232 /* 1233 * See if we should do MTU discovery. Route lookups are 1234 * expensive, so we will only unset the DF bit if: 1235 * 1236 * 1) path_mtu_discovery is disabled 1237 * 2) the SCF_UNREACH flag has been set 1238 */ 1239 if (path_mtu_discovery 1240 && ((sc->sc_flags & SCF_UNREACH) == 0)) { 1241 ip->ip_off |= IP_DF; 1242 } 1243 1244 th = (struct tcphdr *)(ip + 1); 1245 } 1246 th->th_sport = sc->sc_inc.inc_lport; 1247 th->th_dport = sc->sc_inc.inc_fport; 1248 1249 th->th_seq = htonl(sc->sc_iss); 1250 th->th_ack = htonl(sc->sc_irs + 1); 1251 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1252 th->th_x2 = 0; 1253 th->th_flags = TH_SYN | TH_ACK; 1254 th->th_win = htons(sc->sc_wnd); 1255 th->th_urp = 0; 1256 1257 /* Tack on the TCP options. */ 1258 if (optlen == 0) 1259 goto no_options; 1260 optp = (u_int8_t *)(th + 1); 1261 *optp++ = TCPOPT_MAXSEG; 1262 *optp++ = TCPOLEN_MAXSEG; 1263 *optp++ = (mssopt >> 8) & 0xff; 1264 *optp++ = mssopt & 0xff; 1265 1266 if (sc->sc_flags & SCF_WINSCALE) { 1267 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | 1268 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | 1269 sc->sc_request_r_scale); 1270 optp += 4; 1271 } 1272 1273 if (sc->sc_flags & SCF_TIMESTAMP) { 1274 u_int32_t *lp = (u_int32_t *)(optp); 1275 1276 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1277 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1278 *lp++ = htonl(ticks); 1279 *lp = htonl(sc->sc_tsrecent); 1280 optp += TCPOLEN_TSTAMP_APPA; 1281 } 1282 1283 #ifdef TCP_SIGNATURE 1284 /* 1285 * Handle TCP-MD5 passive opener response. 1286 */ 1287 if (sc->sc_flags & SCF_SIGNATURE) { 1288 u_int8_t *bp = optp; 1289 int i; 1290 1291 *bp++ = TCPOPT_SIGNATURE; 1292 *bp++ = TCPOLEN_SIGNATURE; 1293 for (i = 0; i < TCP_SIGLEN; i++) 1294 *bp++ = 0; 1295 tcpsignature_compute(m, 0, optlen, 1296 optp + 2, IPSEC_DIR_OUTBOUND); 1297 *bp++ = TCPOPT_NOP; 1298 *bp++ = TCPOPT_EOL; 1299 optp += TCPOLEN_SIGNATURE + 2; 1300 } 1301 #endif /* TCP_SIGNATURE */ 1302 1303 if (sc->sc_flags & SCF_SACK_PERMITTED) { 1304 *((u_int32_t *)optp) = htonl(TCPOPT_SACK_PERMITTED_ALIGNED); 1305 optp += TCPOLEN_SACK_PERMITTED_ALIGNED; 1306 } 1307 1308 no_options: 1309 if (isipv6) { 1310 struct route_in6 *ro6 = &sc->sc_route6; 1311 1312 th->th_sum = 0; 1313 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); 1314 ip6->ip6_hlim = in6_selecthlim(NULL, 1315 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL); 1316 error = ip6_output(m, NULL, ro6, 0, NULL, NULL, 1317 sc->sc_tp->t_inpcb); 1318 } else { 1319 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1320 htons(tlen - hlen + IPPROTO_TCP)); 1321 m->m_pkthdr.csum_flags = CSUM_TCP; 1322 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1323 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr) + optlen; 1324 KASSERT(sc->sc_flags & SCF_HASH, ("syncache has no hash")); 1325 m_sethash(m, sc->sc_hashval); 1326 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 1327 IP_DEBUGROUTE, NULL, sc->sc_tp->t_inpcb); 1328 } 1329 return (error); 1330 } 1331 1332 /* 1333 * cookie layers: 1334 * 1335 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .| 1336 * | peer iss | 1337 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .| 1338 * | 0 |(A)| | 1339 * (A): peer mss index 1340 */ 1341 1342 /* 1343 * The values below are chosen to minimize the size of the tcp_secret 1344 * table, as well as providing roughly a 16 second lifetime for the cookie. 1345 */ 1346 1347 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */ 1348 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */ 1349 1350 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1) 1351 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS) 1352 #define SYNCOOKIE_TIMEOUT \ 1353 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT)) 1354 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK) 1355 1356 static struct { 1357 u_int32_t ts_secbits[4]; 1358 u_int ts_expire; 1359 } tcp_secret[SYNCOOKIE_NSECRETS]; 1360 1361 static int tcp_msstab[] = { 0, 536, 1460, 8960 }; 1362 1363 static MD5_CTX syn_ctx; 1364 1365 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v)) 1366 1367 struct md5_add { 1368 u_int32_t laddr, faddr; 1369 u_int32_t secbits[4]; 1370 u_int16_t lport, fport; 1371 }; 1372 1373 #ifdef CTASSERT 1374 CTASSERT(sizeof(struct md5_add) == 28); 1375 #endif 1376 1377 /* 1378 * Consider the problem of a recreated (and retransmitted) cookie. If the 1379 * original SYN was accepted, the connection is established. The second 1380 * SYN is inflight, and if it arrives with an ISN that falls within the 1381 * receive window, the connection is killed. 1382 * 1383 * However, since cookies have other problems, this may not be worth 1384 * worrying about. 1385 */ 1386 1387 static u_int32_t 1388 syncookie_generate(struct syncache *sc) 1389 { 1390 u_int32_t md5_buffer[4]; 1391 u_int32_t data; 1392 int idx, i; 1393 struct md5_add add; 1394 #ifdef INET6 1395 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 1396 #else 1397 const boolean_t isipv6 = FALSE; 1398 #endif 1399 1400 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK; 1401 if (tcp_secret[idx].ts_expire < ticks) { 1402 for (i = 0; i < 4; i++) 1403 tcp_secret[idx].ts_secbits[i] = karc4random(); 1404 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT; 1405 } 1406 for (data = NELEM(tcp_msstab) - 1; data > 0; data--) 1407 if (tcp_msstab[data] <= sc->sc_peer_mss) 1408 break; 1409 data = (data << SYNCOOKIE_WNDBITS) | idx; 1410 data ^= sc->sc_irs; /* peer's iss */ 1411 MD5Init(&syn_ctx); 1412 if (isipv6) { 1413 MD5Add(sc->sc_inc.inc6_laddr); 1414 MD5Add(sc->sc_inc.inc6_faddr); 1415 add.laddr = 0; 1416 add.faddr = 0; 1417 } else { 1418 add.laddr = sc->sc_inc.inc_laddr.s_addr; 1419 add.faddr = sc->sc_inc.inc_faddr.s_addr; 1420 } 1421 add.lport = sc->sc_inc.inc_lport; 1422 add.fport = sc->sc_inc.inc_fport; 1423 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1424 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1425 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1426 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1427 MD5Add(add); 1428 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1429 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK); 1430 return (data); 1431 } 1432 1433 static struct syncache * 1434 syncookie_lookup(struct in_conninfo *inc, struct tcphdr *th, struct socket *so) 1435 { 1436 u_int32_t md5_buffer[4]; 1437 struct syncache *sc; 1438 u_int32_t data; 1439 int wnd, idx; 1440 struct md5_add add; 1441 1442 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */ 1443 idx = data & SYNCOOKIE_WNDMASK; 1444 if (tcp_secret[idx].ts_expire < ticks || 1445 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) 1446 return (NULL); 1447 MD5Init(&syn_ctx); 1448 #ifdef INET6 1449 if (inc->inc_isipv6) { 1450 MD5Add(inc->inc6_laddr); 1451 MD5Add(inc->inc6_faddr); 1452 add.laddr = 0; 1453 add.faddr = 0; 1454 } else 1455 #endif 1456 { 1457 add.laddr = inc->inc_laddr.s_addr; 1458 add.faddr = inc->inc_faddr.s_addr; 1459 } 1460 add.lport = inc->inc_lport; 1461 add.fport = inc->inc_fport; 1462 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1463 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1464 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1465 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1466 MD5Add(add); 1467 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1468 data ^= md5_buffer[0]; 1469 if (data & ~SYNCOOKIE_DATAMASK) 1470 return (NULL); 1471 data = data >> SYNCOOKIE_WNDBITS; 1472 1473 /* 1474 * Fill in the syncache values. 1475 * XXX duplicate code from syncache_add 1476 */ 1477 sc = kmalloc(sizeof(struct syncache), M_SYNCACHE, M_WAITOK|M_ZERO); 1478 sc->sc_ipopts = NULL; 1479 sc->sc_inc.inc_fport = inc->inc_fport; 1480 sc->sc_inc.inc_lport = inc->inc_lport; 1481 #ifdef INET6 1482 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1483 if (inc->inc_isipv6) { 1484 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1485 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1486 sc->sc_route6.ro_rt = NULL; 1487 } else 1488 #endif 1489 { 1490 sc->sc_inc.inc_faddr = inc->inc_faddr; 1491 sc->sc_inc.inc_laddr = inc->inc_laddr; 1492 sc->sc_route.ro_rt = NULL; 1493 } 1494 sc->sc_irs = th->th_seq - 1; 1495 sc->sc_iss = th->th_ack - 1; 1496 wnd = ssb_space(&so->so_rcv); 1497 wnd = imax(wnd, 0); 1498 wnd = imin(wnd, TCP_MAXWIN); 1499 sc->sc_wnd = wnd; 1500 sc->sc_flags = 0; 1501 sc->sc_rxtslot = 0; 1502 sc->sc_peer_mss = tcp_msstab[data]; 1503 return (sc); 1504 } 1505