1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * All advertising materials mentioning features or use of this software 36 * must display the following acknowledgement: 37 * This product includes software developed by Jeffrey M. Hsu. 38 * 39 * Copyright (c) 2001 Networks Associates Technologies, Inc. 40 * All rights reserved. 41 * 42 * This software was developed for the FreeBSD Project by Jonathan Lemon 43 * and NAI Labs, the Security Research Division of Network Associates, Inc. 44 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 45 * DARPA CHATS research program. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. The name of the author may not be used to endorse or promote 56 * products derived from this software without specific prior written 57 * permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * $FreeBSD: src/sys/netinet/tcp_syncache.c,v 1.5.2.14 2003/02/24 04:02:27 silby Exp $ 72 */ 73 74 #include "opt_inet.h" 75 #include "opt_inet6.h" 76 #include "opt_ipsec.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/sysctl.h> 82 #include <sys/malloc.h> 83 #include <sys/mbuf.h> 84 #include <sys/md5.h> 85 #include <sys/proc.h> /* for proc0 declaration */ 86 #include <sys/random.h> 87 #include <sys/socket.h> 88 #include <sys/socketvar.h> 89 #include <sys/in_cksum.h> 90 91 #include <sys/msgport2.h> 92 #include <net/netmsg2.h> 93 #include <net/netisr2.h> 94 95 #include <net/if.h> 96 #include <net/route.h> 97 98 #include <netinet/in.h> 99 #include <netinet/in_systm.h> 100 #include <netinet/ip.h> 101 #include <netinet/in_var.h> 102 #include <netinet/in_pcb.h> 103 #include <netinet/ip_var.h> 104 #include <netinet/ip6.h> 105 #ifdef INET6 106 #include <netinet/icmp6.h> 107 #include <netinet6/nd6.h> 108 #endif 109 #include <netinet6/ip6_var.h> 110 #include <netinet6/in6_pcb.h> 111 #include <netinet/tcp.h> 112 #include <netinet/tcp_fsm.h> 113 #include <netinet/tcp_seq.h> 114 #include <netinet/tcp_timer.h> 115 #include <netinet/tcp_timer2.h> 116 #include <netinet/tcp_var.h> 117 #include <netinet6/tcp6_var.h> 118 119 #ifdef IPSEC 120 #include <netinet6/ipsec.h> 121 #ifdef INET6 122 #include <netinet6/ipsec6.h> 123 #endif 124 #include <netproto/key/key.h> 125 #endif /*IPSEC*/ 126 127 #ifdef FAST_IPSEC 128 #include <netproto/ipsec/ipsec.h> 129 #ifdef INET6 130 #include <netproto/ipsec/ipsec6.h> 131 #endif 132 #include <netproto/ipsec/key.h> 133 #define IPSEC 134 #endif /*FAST_IPSEC*/ 135 136 static int tcp_syncookies = 1; 137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, 138 &tcp_syncookies, 0, 139 "Use TCP SYN cookies if the syncache overflows"); 140 141 static void syncache_drop(struct syncache *, struct syncache_head *); 142 static void syncache_free(struct syncache *); 143 static void syncache_insert(struct syncache *, struct syncache_head *); 144 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); 145 static int syncache_respond(struct syncache *, struct mbuf *); 146 static struct socket *syncache_socket(struct syncache *, struct socket *, 147 struct mbuf *); 148 static void syncache_timer(void *); 149 static u_int32_t syncookie_generate(struct syncache *); 150 static struct syncache *syncookie_lookup(struct in_conninfo *, 151 struct tcphdr *, struct socket *); 152 153 /* 154 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 155 * 4 retransmits corresponds to a timeout of (3 + 3 + 3 + 3 + 3 == 15) seconds 156 * or (1 + 1 + 2 + 4 + 8 == 16) seconds if RFC6298 is used, the odds are that 157 * the user has given up attempting to connect by then. 158 */ 159 #define SYNCACHE_MAXREXMTS 4 160 161 /* Arbitrary values */ 162 #define TCP_SYNCACHE_HASHSIZE 512 163 #define TCP_SYNCACHE_BUCKETLIMIT 30 164 165 struct netmsg_sc_timer { 166 struct netmsg_base base; 167 struct msgrec *nm_mrec; /* back pointer to containing msgrec */ 168 }; 169 170 struct msgrec { 171 struct netmsg_sc_timer msg; 172 lwkt_port_t port; /* constant after init */ 173 int slot; /* constant after init */ 174 }; 175 176 static void syncache_timer_handler(netmsg_t); 177 178 struct tcp_syncache { 179 u_int hashsize; 180 u_int hashmask; 181 u_int bucket_limit; 182 u_int cache_limit; 183 u_int rexmt_limit; 184 u_int hash_secret; 185 }; 186 static struct tcp_syncache tcp_syncache; 187 188 TAILQ_HEAD(syncache_list, syncache); 189 190 struct tcp_syncache_percpu { 191 struct syncache_head *hashbase; 192 u_int cache_count; 193 struct syncache_list timerq[SYNCACHE_MAXREXMTS + 1]; 194 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; 195 struct msgrec mrec[SYNCACHE_MAXREXMTS + 1]; 196 }; 197 static struct tcp_syncache_percpu tcp_syncache_percpu[MAXCPU]; 198 199 static struct lwkt_port syncache_null_rport; 200 201 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); 202 203 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD, 204 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); 205 206 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD, 207 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); 208 209 /* XXX JH */ 210 #if 0 211 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD, 212 &tcp_syncache.cache_count, 0, "Current number of entries in syncache"); 213 #endif 214 215 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD, 216 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); 217 218 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, 219 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); 220 221 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 222 223 #define SYNCACHE_HASH(inc, mask) \ 224 ((tcp_syncache.hash_secret ^ \ 225 (inc)->inc_faddr.s_addr ^ \ 226 ((inc)->inc_faddr.s_addr >> 16) ^ \ 227 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 228 229 #define SYNCACHE_HASH6(inc, mask) \ 230 ((tcp_syncache.hash_secret ^ \ 231 (inc)->inc6_faddr.s6_addr32[0] ^ \ 232 (inc)->inc6_faddr.s6_addr32[3] ^ \ 233 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 234 235 #define ENDPTS_EQ(a, b) ( \ 236 (a)->ie_fport == (b)->ie_fport && \ 237 (a)->ie_lport == (b)->ie_lport && \ 238 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ 239 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ 240 ) 241 242 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) 243 244 static __inline int 245 syncache_rto(int slot) 246 { 247 if (tcp_low_rtobase) 248 return (TCPTV_RTOBASE * tcp_syn_backoff_low[slot]); 249 else 250 return (TCPTV_RTOBASE * tcp_syn_backoff[slot]); 251 } 252 253 static __inline void 254 syncache_timeout(struct tcp_syncache_percpu *syncache_percpu, 255 struct syncache *sc, int slot) 256 { 257 int rto; 258 259 if (slot > 0) { 260 /* 261 * Record the time that we spent in SYN|ACK 262 * retransmition. 263 * 264 * Needed by RFC3390 and RFC6298. 265 */ 266 sc->sc_rxtused += syncache_rto(slot - 1); 267 } 268 sc->sc_rxtslot = slot; 269 270 rto = syncache_rto(slot); 271 sc->sc_rxttime = ticks + rto; 272 273 TAILQ_INSERT_TAIL(&syncache_percpu->timerq[slot], sc, sc_timerq); 274 if (!callout_active(&syncache_percpu->tt_timerq[slot])) { 275 callout_reset(&syncache_percpu->tt_timerq[slot], rto, 276 syncache_timer, &syncache_percpu->mrec[slot]); 277 } 278 } 279 280 static void 281 syncache_free(struct syncache *sc) 282 { 283 struct rtentry *rt; 284 #ifdef INET6 285 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 286 #else 287 const boolean_t isipv6 = FALSE; 288 #endif 289 290 if (sc->sc_ipopts) 291 m_free(sc->sc_ipopts); 292 293 rt = isipv6 ? sc->sc_route6.ro_rt : sc->sc_route.ro_rt; 294 if (rt != NULL) { 295 /* 296 * If this is the only reference to a protocol-cloned 297 * route, remove it immediately. 298 */ 299 if ((rt->rt_flags & RTF_WASCLONED) && rt->rt_refcnt == 1) 300 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 301 rt_mask(rt), rt->rt_flags, NULL); 302 RTFREE(rt); 303 } 304 kfree(sc, M_SYNCACHE); 305 } 306 307 void 308 syncache_init(void) 309 { 310 int i, cpu; 311 312 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 313 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 314 tcp_syncache.cache_limit = 315 tcp_syncache.hashsize * tcp_syncache.bucket_limit; 316 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 317 tcp_syncache.hash_secret = karc4random(); 318 319 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 320 &tcp_syncache.hashsize); 321 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 322 &tcp_syncache.cache_limit); 323 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 324 &tcp_syncache.bucket_limit); 325 if (!powerof2(tcp_syncache.hashsize)) { 326 kprintf("WARNING: syncache hash size is not a power of 2.\n"); 327 tcp_syncache.hashsize = 512; /* safe default */ 328 } 329 tcp_syncache.hashmask = tcp_syncache.hashsize - 1; 330 331 lwkt_initport_replyonly_null(&syncache_null_rport); 332 333 for (cpu = 0; cpu < ncpus2; cpu++) { 334 struct tcp_syncache_percpu *syncache_percpu; 335 336 syncache_percpu = &tcp_syncache_percpu[cpu]; 337 /* Allocate the hash table. */ 338 syncache_percpu->hashbase = kmalloc(tcp_syncache.hashsize * sizeof(struct syncache_head), 339 M_SYNCACHE, M_WAITOK); 340 341 /* Initialize the hash buckets. */ 342 for (i = 0; i < tcp_syncache.hashsize; i++) { 343 struct syncache_head *bucket; 344 345 bucket = &syncache_percpu->hashbase[i]; 346 TAILQ_INIT(&bucket->sch_bucket); 347 bucket->sch_length = 0; 348 } 349 350 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) { 351 /* Initialize the timer queues. */ 352 TAILQ_INIT(&syncache_percpu->timerq[i]); 353 callout_init_mp(&syncache_percpu->tt_timerq[i]); 354 355 syncache_percpu->mrec[i].slot = i; 356 syncache_percpu->mrec[i].port = netisr_cpuport(cpu); 357 syncache_percpu->mrec[i].msg.nm_mrec = 358 &syncache_percpu->mrec[i]; 359 netmsg_init(&syncache_percpu->mrec[i].msg.base, 360 NULL, &syncache_null_rport, 361 0, syncache_timer_handler); 362 } 363 } 364 } 365 366 static void 367 syncache_insert(struct syncache *sc, struct syncache_head *sch) 368 { 369 struct tcp_syncache_percpu *syncache_percpu; 370 struct syncache *sc2; 371 int i; 372 373 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 374 375 /* 376 * Make sure that we don't overflow the per-bucket 377 * limit or the total cache size limit. 378 */ 379 if (sch->sch_length >= tcp_syncache.bucket_limit) { 380 /* 381 * The bucket is full, toss the oldest element. 382 */ 383 sc2 = TAILQ_FIRST(&sch->sch_bucket); 384 if (sc2->sc_tp != NULL) 385 sc2->sc_tp->ts_recent = ticks; 386 syncache_drop(sc2, sch); 387 tcpstat.tcps_sc_bucketoverflow++; 388 } else if (syncache_percpu->cache_count >= tcp_syncache.cache_limit) { 389 /* 390 * The cache is full. Toss the oldest entry in the 391 * entire cache. This is the front entry in the 392 * first non-empty timer queue with the largest 393 * timeout value. 394 */ 395 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { 396 sc2 = TAILQ_FIRST(&syncache_percpu->timerq[i]); 397 while (sc2 && (sc2->sc_flags & SCF_MARKER)) 398 sc2 = TAILQ_NEXT(sc2, sc_timerq); 399 if (sc2 != NULL) 400 break; 401 } 402 if (sc2->sc_tp != NULL) 403 sc2->sc_tp->ts_recent = ticks; 404 syncache_drop(sc2, NULL); 405 tcpstat.tcps_sc_cacheoverflow++; 406 } 407 408 /* Initialize the entry's timer. */ 409 syncache_timeout(syncache_percpu, sc, 0); 410 411 /* Put it into the bucket. */ 412 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash); 413 sch->sch_length++; 414 syncache_percpu->cache_count++; 415 tcpstat.tcps_sc_added++; 416 } 417 418 void 419 syncache_destroy(struct tcpcb *tp) 420 { 421 struct tcp_syncache_percpu *syncache_percpu; 422 struct syncache_head *bucket; 423 struct syncache *sc; 424 int i; 425 426 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 427 sc = NULL; 428 429 for (i = 0; i < tcp_syncache.hashsize; i++) { 430 bucket = &syncache_percpu->hashbase[i]; 431 TAILQ_FOREACH(sc, &bucket->sch_bucket, sc_hash) { 432 if (sc->sc_tp == tp) 433 sc->sc_tp = NULL; 434 } 435 } 436 } 437 438 static void 439 syncache_drop(struct syncache *sc, struct syncache_head *sch) 440 { 441 struct tcp_syncache_percpu *syncache_percpu; 442 #ifdef INET6 443 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 444 #else 445 const boolean_t isipv6 = FALSE; 446 #endif 447 448 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 449 450 if (sch == NULL) { 451 if (isipv6) { 452 sch = &syncache_percpu->hashbase[ 453 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; 454 } else { 455 sch = &syncache_percpu->hashbase[ 456 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)]; 457 } 458 } 459 460 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 461 sch->sch_length--; 462 syncache_percpu->cache_count--; 463 464 /* 465 * Cleanup 466 */ 467 sc->sc_tp = NULL; 468 469 /* 470 * Remove the entry from the syncache timer/timeout queue. Note 471 * that we do not try to stop any running timer since we do not know 472 * whether the timer's message is in-transit or not. Since timeouts 473 * are fairly long, taking an unneeded callout does not detrimentally 474 * effect performance. 475 */ 476 TAILQ_REMOVE(&syncache_percpu->timerq[sc->sc_rxtslot], sc, sc_timerq); 477 478 syncache_free(sc); 479 } 480 481 /* 482 * Place a timeout message on the TCP thread's message queue. 483 * This routine runs in soft interrupt context. 484 * 485 * An invariant is for this routine to be called, the callout must 486 * have been active. Note that the callout is not deactivated until 487 * after the message has been processed in syncache_timer_handler() below. 488 */ 489 static void 490 syncache_timer(void *p) 491 { 492 struct netmsg_sc_timer *msg = p; 493 494 lwkt_sendmsg(msg->nm_mrec->port, &msg->base.lmsg); 495 } 496 497 /* 498 * Service a timer message queued by timer expiration. 499 * This routine runs in the TCP protocol thread. 500 * 501 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 502 * If we have retransmitted an entry the maximum number of times, expire it. 503 * 504 * When we finish processing timed-out entries, we restart the timer if there 505 * are any entries still on the queue and deactivate it otherwise. Only after 506 * a timer has been deactivated here can it be restarted by syncache_timeout(). 507 */ 508 static void 509 syncache_timer_handler(netmsg_t msg) 510 { 511 struct tcp_syncache_percpu *syncache_percpu; 512 struct syncache *sc; 513 struct syncache marker; 514 struct syncache_list *list; 515 struct inpcb *inp; 516 int slot; 517 518 slot = ((struct netmsg_sc_timer *)msg)->nm_mrec->slot; 519 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 520 521 list = &syncache_percpu->timerq[slot]; 522 523 /* 524 * Use a marker to keep our place in the scan. syncache_drop() 525 * can block and cause any next pointer we cache to become stale. 526 */ 527 marker.sc_flags = SCF_MARKER; 528 TAILQ_INSERT_HEAD(list, &marker, sc_timerq); 529 530 while ((sc = TAILQ_NEXT(&marker, sc_timerq)) != NULL) { 531 /* 532 * Move the marker. 533 */ 534 TAILQ_REMOVE(list, &marker, sc_timerq); 535 TAILQ_INSERT_AFTER(list, sc, &marker, sc_timerq); 536 537 if (sc->sc_flags & SCF_MARKER) 538 continue; 539 540 if (ticks < sc->sc_rxttime) 541 break; /* finished because timerq sorted by time */ 542 if (sc->sc_tp == NULL) { 543 syncache_drop(sc, NULL); 544 tcpstat.tcps_sc_stale++; 545 continue; 546 } 547 inp = sc->sc_tp->t_inpcb; 548 if (slot == SYNCACHE_MAXREXMTS || 549 slot >= tcp_syncache.rexmt_limit || 550 inp == NULL || 551 inp->inp_gencnt != sc->sc_inp_gencnt) { 552 syncache_drop(sc, NULL); 553 tcpstat.tcps_sc_stale++; 554 continue; 555 } 556 /* 557 * syncache_respond() may call back into the syncache to 558 * to modify another entry, so do not obtain the next 559 * entry on the timer chain until it has completed. 560 */ 561 syncache_respond(sc, NULL); 562 tcpstat.tcps_sc_retransmitted++; 563 TAILQ_REMOVE(list, sc, sc_timerq); 564 syncache_timeout(syncache_percpu, sc, slot + 1); 565 } 566 TAILQ_REMOVE(list, &marker, sc_timerq); 567 568 if (sc != NULL) { 569 callout_reset(&syncache_percpu->tt_timerq[slot], 570 sc->sc_rxttime - ticks, syncache_timer, 571 &syncache_percpu->mrec[slot]); 572 } else { 573 callout_deactivate(&syncache_percpu->tt_timerq[slot]); 574 } 575 lwkt_replymsg(&msg->base.lmsg, 0); 576 } 577 578 /* 579 * Find an entry in the syncache. 580 */ 581 struct syncache * 582 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp) 583 { 584 struct tcp_syncache_percpu *syncache_percpu; 585 struct syncache *sc; 586 struct syncache_head *sch; 587 588 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 589 #ifdef INET6 590 if (inc->inc_isipv6) { 591 sch = &syncache_percpu->hashbase[ 592 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; 593 *schp = sch; 594 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) 595 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 596 return (sc); 597 } else 598 #endif 599 { 600 sch = &syncache_percpu->hashbase[ 601 SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; 602 *schp = sch; 603 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 604 #ifdef INET6 605 if (sc->sc_inc.inc_isipv6) 606 continue; 607 #endif 608 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 609 return (sc); 610 } 611 } 612 return (NULL); 613 } 614 615 /* 616 * This function is called when we get a RST for a 617 * non-existent connection, so that we can see if the 618 * connection is in the syn cache. If it is, zap it. 619 */ 620 void 621 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th) 622 { 623 struct syncache *sc; 624 struct syncache_head *sch; 625 626 sc = syncache_lookup(inc, &sch); 627 if (sc == NULL) { 628 return; 629 } 630 /* 631 * If the RST bit is set, check the sequence number to see 632 * if this is a valid reset segment. 633 * RFC 793 page 37: 634 * In all states except SYN-SENT, all reset (RST) segments 635 * are validated by checking their SEQ-fields. A reset is 636 * valid if its sequence number is in the window. 637 * 638 * The sequence number in the reset segment is normally an 639 * echo of our outgoing acknowlegement numbers, but some hosts 640 * send a reset with the sequence number at the rightmost edge 641 * of our receive window, and we have to handle this case. 642 */ 643 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 644 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 645 syncache_drop(sc, sch); 646 tcpstat.tcps_sc_reset++; 647 } 648 } 649 650 void 651 syncache_badack(struct in_conninfo *inc) 652 { 653 struct syncache *sc; 654 struct syncache_head *sch; 655 656 sc = syncache_lookup(inc, &sch); 657 if (sc != NULL) { 658 syncache_drop(sc, sch); 659 tcpstat.tcps_sc_badack++; 660 } 661 } 662 663 void 664 syncache_unreach(struct in_conninfo *inc, struct tcphdr *th) 665 { 666 struct syncache *sc; 667 struct syncache_head *sch; 668 669 /* we are called at splnet() here */ 670 sc = syncache_lookup(inc, &sch); 671 if (sc == NULL) 672 return; 673 674 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 675 if (ntohl(th->th_seq) != sc->sc_iss) 676 return; 677 678 /* 679 * If we've rertransmitted 3 times and this is our second error, 680 * we remove the entry. Otherwise, we allow it to continue on. 681 * This prevents us from incorrectly nuking an entry during a 682 * spurious network outage. 683 * 684 * See tcp_notify(). 685 */ 686 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) { 687 sc->sc_flags |= SCF_UNREACH; 688 return; 689 } 690 syncache_drop(sc, sch); 691 tcpstat.tcps_sc_unreach++; 692 } 693 694 /* 695 * Build a new TCP socket structure from a syncache entry. 696 * 697 * This is called from the context of the SYN+ACK 698 */ 699 static struct socket * 700 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m) 701 { 702 struct inpcb *inp = NULL, *linp; 703 struct socket *so; 704 struct tcpcb *tp, *ltp; 705 lwkt_port_t port; 706 #ifdef INET6 707 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 708 #else 709 const boolean_t isipv6 = FALSE; 710 #endif 711 struct sockaddr_in sin_faddr; 712 struct sockaddr_in6 sin6_faddr; 713 struct sockaddr *faddr; 714 715 if (isipv6) { 716 faddr = (struct sockaddr *)&sin6_faddr; 717 sin6_faddr.sin6_family = AF_INET6; 718 sin6_faddr.sin6_len = sizeof(sin6_faddr); 719 sin6_faddr.sin6_addr = sc->sc_inc.inc6_faddr; 720 sin6_faddr.sin6_port = sc->sc_inc.inc_fport; 721 sin6_faddr.sin6_flowinfo = sin6_faddr.sin6_scope_id = 0; 722 } else { 723 faddr = (struct sockaddr *)&sin_faddr; 724 sin_faddr.sin_family = AF_INET; 725 sin_faddr.sin_len = sizeof(sin_faddr); 726 sin_faddr.sin_addr = sc->sc_inc.inc_faddr; 727 sin_faddr.sin_port = sc->sc_inc.inc_fport; 728 bzero(sin_faddr.sin_zero, sizeof(sin_faddr.sin_zero)); 729 } 730 731 /* 732 * Ok, create the full blown connection, and set things up 733 * as they would have been set up if we had created the 734 * connection when the SYN arrived. If we can't create 735 * the connection, abort it. 736 * 737 * Set the protocol processing port for the socket to the current 738 * port (that the connection came in on). 739 */ 740 so = sonewconn_faddr(lso, SS_ISCONNECTED, faddr); 741 if (so == NULL) { 742 /* 743 * Drop the connection; we will send a RST if the peer 744 * retransmits the ACK, 745 */ 746 tcpstat.tcps_listendrop++; 747 goto abort; 748 } 749 750 /* 751 * Insert new socket into hash list. 752 */ 753 inp = so->so_pcb; 754 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; 755 if (isipv6) { 756 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 757 } else { 758 #ifdef INET6 759 inp->inp_vflag &= ~INP_IPV6; 760 inp->inp_vflag |= INP_IPV4; 761 inp->inp_flags &= ~IN6P_IPV6_V6ONLY; 762 #endif 763 inp->inp_laddr = sc->sc_inc.inc_laddr; 764 } 765 inp->inp_lport = sc->sc_inc.inc_lport; 766 if (in_pcbinsporthash(inp) != 0) { 767 /* 768 * Undo the assignments above if we failed to 769 * put the PCB on the hash lists. 770 */ 771 if (isipv6) 772 inp->in6p_laddr = kin6addr_any; 773 else 774 inp->inp_laddr.s_addr = INADDR_ANY; 775 inp->inp_lport = 0; 776 goto abort; 777 } 778 linp = lso->so_pcb; 779 #ifdef IPSEC 780 /* copy old policy into new socket's */ 781 if (ipsec_copy_policy(linp->inp_sp, inp->inp_sp)) 782 kprintf("syncache_expand: could not copy policy\n"); 783 #endif 784 if (isipv6) { 785 struct in6_addr laddr6; 786 /* 787 * Inherit socket options from the listening socket. 788 * Note that in6p_inputopts are not (and should not be) 789 * copied, since it stores previously received options and is 790 * used to detect if each new option is different than the 791 * previous one and hence should be passed to a user. 792 * If we copied in6p_inputopts, a user would not be able to 793 * receive options just after calling the accept system call. 794 */ 795 inp->inp_flags |= linp->inp_flags & INP_CONTROLOPTS; 796 if (linp->in6p_outputopts) 797 inp->in6p_outputopts = 798 ip6_copypktopts(linp->in6p_outputopts, M_INTWAIT); 799 inp->in6p_route = sc->sc_route6; 800 sc->sc_route6.ro_rt = NULL; 801 802 laddr6 = inp->in6p_laddr; 803 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 804 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 805 if (in6_pcbconnect(inp, faddr, &thread0)) { 806 inp->in6p_laddr = laddr6; 807 goto abort; 808 } 809 } else { 810 struct in_addr laddr; 811 812 inp->inp_options = ip_srcroute(m); 813 if (inp->inp_options == NULL) { 814 inp->inp_options = sc->sc_ipopts; 815 sc->sc_ipopts = NULL; 816 } 817 inp->inp_route = sc->sc_route; 818 sc->sc_route.ro_rt = NULL; 819 820 laddr = inp->inp_laddr; 821 if (inp->inp_laddr.s_addr == INADDR_ANY) 822 inp->inp_laddr = sc->sc_inc.inc_laddr; 823 if (in_pcbconnect(inp, faddr, &thread0)) { 824 inp->inp_laddr = laddr; 825 goto abort; 826 } 827 } 828 829 /* 830 * The current port should be in the context of the SYN+ACK and 831 * so should match the tcp address port. 832 */ 833 if (isipv6) { 834 port = tcp6_addrport(); 835 } else { 836 port = tcp_addrport(inp->inp_faddr.s_addr, inp->inp_fport, 837 inp->inp_laddr.s_addr, inp->inp_lport); 838 } 839 KASSERT(port == &curthread->td_msgport, 840 ("TCP PORT MISMATCH %p vs %p\n", port, &curthread->td_msgport)); 841 842 tp = intotcpcb(inp); 843 tp->t_state = TCPS_SYN_RECEIVED; 844 tp->iss = sc->sc_iss; 845 tp->irs = sc->sc_irs; 846 tcp_rcvseqinit(tp); 847 tcp_sendseqinit(tp); 848 tp->snd_wnd = sc->sc_sndwnd; 849 tp->snd_wl1 = sc->sc_irs; 850 tp->rcv_up = sc->sc_irs + 1; 851 tp->rcv_wnd = sc->sc_wnd; 852 tp->rcv_adv += tp->rcv_wnd; 853 854 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH | TF_NODELAY); 855 if (sc->sc_flags & SCF_NOOPT) 856 tp->t_flags |= TF_NOOPT; 857 if (sc->sc_flags & SCF_WINSCALE) { 858 tp->t_flags |= TF_REQ_SCALE | TF_RCVD_SCALE; 859 tp->snd_scale = sc->sc_requested_s_scale; 860 tp->request_r_scale = sc->sc_request_r_scale; 861 } 862 if (sc->sc_flags & SCF_TIMESTAMP) { 863 tp->t_flags |= TF_REQ_TSTMP | TF_RCVD_TSTMP; 864 tp->ts_recent = sc->sc_tsrecent; 865 tp->ts_recent_age = ticks; 866 } 867 if (sc->sc_flags & SCF_SACK_PERMITTED) 868 tp->t_flags |= TF_SACK_PERMITTED; 869 870 #ifdef TCP_SIGNATURE 871 if (sc->sc_flags & SCF_SIGNATURE) 872 tp->t_flags |= TF_SIGNATURE; 873 #endif /* TCP_SIGNATURE */ 874 875 tp->t_rxtsyn = sc->sc_rxtused; 876 tcp_mss(tp, sc->sc_peer_mss); 877 878 /* 879 * Inherit some properties from the listen socket 880 */ 881 ltp = intotcpcb(linp); 882 tp->t_keepinit = ltp->t_keepinit; 883 tp->t_keepidle = ltp->t_keepidle; 884 tp->t_keepintvl = ltp->t_keepintvl; 885 tp->t_keepcnt = ltp->t_keepcnt; 886 tp->t_maxidle = ltp->t_maxidle; 887 888 tcp_create_timermsg(tp, port); 889 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepinit, tcp_timer_keep); 890 891 tcpstat.tcps_accepts++; 892 return (so); 893 894 abort: 895 if (so != NULL) 896 soabort_oncpu(so); 897 return (NULL); 898 } 899 900 /* 901 * This function gets called when we receive an ACK for a 902 * socket in the LISTEN state. We look up the connection 903 * in the syncache, and if its there, we pull it out of 904 * the cache and turn it into a full-blown connection in 905 * the SYN-RECEIVED state. 906 */ 907 int 908 syncache_expand(struct in_conninfo *inc, struct tcphdr *th, struct socket **sop, 909 struct mbuf *m) 910 { 911 struct syncache *sc; 912 struct syncache_head *sch; 913 struct socket *so; 914 915 sc = syncache_lookup(inc, &sch); 916 if (sc == NULL) { 917 /* 918 * There is no syncache entry, so see if this ACK is 919 * a returning syncookie. To do this, first: 920 * A. See if this socket has had a syncache entry dropped in 921 * the past. We don't want to accept a bogus syncookie 922 * if we've never received a SYN. 923 * B. check that the syncookie is valid. If it is, then 924 * cobble up a fake syncache entry, and return. 925 */ 926 if (!tcp_syncookies) 927 return (0); 928 sc = syncookie_lookup(inc, th, *sop); 929 if (sc == NULL) 930 return (0); 931 sch = NULL; 932 tcpstat.tcps_sc_recvcookie++; 933 } 934 935 /* 936 * If seg contains an ACK, but not for our SYN/ACK, send a RST. 937 */ 938 if (th->th_ack != sc->sc_iss + 1) 939 return (0); 940 941 so = syncache_socket(sc, *sop, m); 942 if (so == NULL) { 943 #if 0 944 resetandabort: 945 /* XXXjlemon check this - is this correct? */ 946 tcp_respond(NULL, m, m, th, 947 th->th_seq + tlen, (tcp_seq)0, TH_RST | TH_ACK); 948 #endif 949 m_freem(m); /* XXX only needed for above */ 950 tcpstat.tcps_sc_aborted++; 951 } else { 952 tcpstat.tcps_sc_completed++; 953 } 954 if (sch == NULL) 955 syncache_free(sc); 956 else 957 syncache_drop(sc, sch); 958 *sop = so; 959 return (1); 960 } 961 962 /* 963 * Given a LISTEN socket and an inbound SYN request, add 964 * this to the syn cache, and send back a segment: 965 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 966 * to the source. 967 * 968 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 969 * Doing so would require that we hold onto the data and deliver it 970 * to the application. However, if we are the target of a SYN-flood 971 * DoS attack, an attacker could send data which would eventually 972 * consume all available buffer space if it were ACKed. By not ACKing 973 * the data, we avoid this DoS scenario. 974 */ 975 int 976 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 977 struct socket *so, struct mbuf *m) 978 { 979 struct tcp_syncache_percpu *syncache_percpu; 980 struct tcpcb *tp; 981 struct syncache *sc = NULL; 982 struct syncache_head *sch; 983 struct mbuf *ipopts = NULL; 984 int win; 985 986 syncache_percpu = &tcp_syncache_percpu[mycpu->gd_cpuid]; 987 tp = sototcpcb(so); 988 989 /* 990 * Remember the IP options, if any. 991 */ 992 #ifdef INET6 993 if (!inc->inc_isipv6) 994 #endif 995 ipopts = ip_srcroute(m); 996 997 /* 998 * See if we already have an entry for this connection. 999 * If we do, resend the SYN,ACK, and reset the retransmit timer. 1000 * 1001 * XXX 1002 * The syncache should be re-initialized with the contents 1003 * of the new SYN which may have different options. 1004 */ 1005 sc = syncache_lookup(inc, &sch); 1006 if (sc != NULL) { 1007 tcpstat.tcps_sc_dupsyn++; 1008 if (ipopts) { 1009 /* 1010 * If we were remembering a previous source route, 1011 * forget it and use the new one we've been given. 1012 */ 1013 if (sc->sc_ipopts) 1014 m_free(sc->sc_ipopts); 1015 sc->sc_ipopts = ipopts; 1016 } 1017 /* 1018 * Update timestamp if present. 1019 */ 1020 if (sc->sc_flags & SCF_TIMESTAMP) 1021 sc->sc_tsrecent = to->to_tsval; 1022 1023 /* Just update the TOF_SACK_PERMITTED for now. */ 1024 if (tcp_do_sack && (to->to_flags & TOF_SACK_PERMITTED)) 1025 sc->sc_flags |= SCF_SACK_PERMITTED; 1026 else 1027 sc->sc_flags &= ~SCF_SACK_PERMITTED; 1028 1029 /* Update initial send window */ 1030 sc->sc_sndwnd = th->th_win; 1031 1032 /* 1033 * PCB may have changed, pick up new values. 1034 */ 1035 sc->sc_tp = tp; 1036 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 1037 if (syncache_respond(sc, m) == 0) { 1038 TAILQ_REMOVE(&syncache_percpu->timerq[sc->sc_rxtslot], 1039 sc, sc_timerq); 1040 syncache_timeout(syncache_percpu, sc, sc->sc_rxtslot); 1041 tcpstat.tcps_sndacks++; 1042 tcpstat.tcps_sndtotal++; 1043 } 1044 return (1); 1045 } 1046 1047 /* 1048 * Fill in the syncache values. 1049 */ 1050 sc = kmalloc(sizeof(struct syncache), M_SYNCACHE, M_WAITOK|M_ZERO); 1051 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 1052 sc->sc_ipopts = ipopts; 1053 sc->sc_inc.inc_fport = inc->inc_fport; 1054 sc->sc_inc.inc_lport = inc->inc_lport; 1055 sc->sc_tp = tp; 1056 #ifdef INET6 1057 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1058 if (inc->inc_isipv6) { 1059 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1060 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1061 sc->sc_route6.ro_rt = NULL; 1062 } else 1063 #endif 1064 { 1065 sc->sc_inc.inc_faddr = inc->inc_faddr; 1066 sc->sc_inc.inc_laddr = inc->inc_laddr; 1067 sc->sc_route.ro_rt = NULL; 1068 } 1069 sc->sc_irs = th->th_seq; 1070 sc->sc_flags = 0; 1071 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0; 1072 if (tcp_syncookies) 1073 sc->sc_iss = syncookie_generate(sc); 1074 else 1075 sc->sc_iss = karc4random(); 1076 1077 /* Initial receive window: clip ssb_space to [0 .. TCP_MAXWIN] */ 1078 win = ssb_space(&so->so_rcv); 1079 win = imax(win, 0); 1080 win = imin(win, TCP_MAXWIN); 1081 sc->sc_wnd = win; 1082 1083 if (tcp_do_rfc1323) { 1084 /* 1085 * A timestamp received in a SYN makes 1086 * it ok to send timestamp requests and replies. 1087 */ 1088 if (to->to_flags & TOF_TS) { 1089 sc->sc_tsrecent = to->to_tsval; 1090 sc->sc_flags |= SCF_TIMESTAMP; 1091 } 1092 if (to->to_flags & TOF_SCALE) { 1093 int wscale = TCP_MIN_WINSHIFT; 1094 1095 /* Compute proper scaling value from buffer space */ 1096 while (wscale < TCP_MAX_WINSHIFT && 1097 (TCP_MAXWIN << wscale) < so->so_rcv.ssb_hiwat) { 1098 wscale++; 1099 } 1100 sc->sc_request_r_scale = wscale; 1101 sc->sc_requested_s_scale = to->to_requested_s_scale; 1102 sc->sc_flags |= SCF_WINSCALE; 1103 } 1104 } 1105 if (tcp_do_sack && (to->to_flags & TOF_SACK_PERMITTED)) 1106 sc->sc_flags |= SCF_SACK_PERMITTED; 1107 if (tp->t_flags & TF_NOOPT) 1108 sc->sc_flags = SCF_NOOPT; 1109 #ifdef TCP_SIGNATURE 1110 /* 1111 * If listening socket requested TCP digests, and received SYN 1112 * contains the option, flag this in the syncache so that 1113 * syncache_respond() will do the right thing with the SYN+ACK. 1114 * XXX Currently we always record the option by default and will 1115 * attempt to use it in syncache_respond(). 1116 */ 1117 if (to->to_flags & TOF_SIGNATURE) 1118 sc->sc_flags = SCF_SIGNATURE; 1119 #endif /* TCP_SIGNATURE */ 1120 sc->sc_sndwnd = th->th_win; 1121 1122 if (syncache_respond(sc, m) == 0) { 1123 syncache_insert(sc, sch); 1124 tcpstat.tcps_sndacks++; 1125 tcpstat.tcps_sndtotal++; 1126 } else { 1127 syncache_free(sc); 1128 tcpstat.tcps_sc_dropped++; 1129 } 1130 return (1); 1131 } 1132 1133 static int 1134 syncache_respond(struct syncache *sc, struct mbuf *m) 1135 { 1136 u_int8_t *optp; 1137 int optlen, error; 1138 u_int16_t tlen, hlen, mssopt; 1139 struct ip *ip = NULL; 1140 struct rtentry *rt; 1141 struct tcphdr *th; 1142 struct ip6_hdr *ip6 = NULL; 1143 #ifdef INET6 1144 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 1145 #else 1146 const boolean_t isipv6 = FALSE; 1147 #endif 1148 1149 if (isipv6) { 1150 rt = tcp_rtlookup6(&sc->sc_inc); 1151 if (rt != NULL) 1152 mssopt = rt->rt_ifp->if_mtu - 1153 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); 1154 else 1155 mssopt = tcp_v6mssdflt; 1156 hlen = sizeof(struct ip6_hdr); 1157 } else { 1158 rt = tcp_rtlookup(&sc->sc_inc); 1159 if (rt != NULL) 1160 mssopt = rt->rt_ifp->if_mtu - 1161 (sizeof(struct ip) + sizeof(struct tcphdr)); 1162 else 1163 mssopt = tcp_mssdflt; 1164 hlen = sizeof(struct ip); 1165 } 1166 1167 /* Compute the size of the TCP options. */ 1168 if (sc->sc_flags & SCF_NOOPT) { 1169 optlen = 0; 1170 } else { 1171 optlen = TCPOLEN_MAXSEG + 1172 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) + 1173 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) + 1174 ((sc->sc_flags & SCF_SACK_PERMITTED) ? 1175 TCPOLEN_SACK_PERMITTED_ALIGNED : 0); 1176 #ifdef TCP_SIGNATURE 1177 optlen += ((sc->sc_flags & SCF_SIGNATURE) ? 1178 (TCPOLEN_SIGNATURE + 2) : 0); 1179 #endif /* TCP_SIGNATURE */ 1180 } 1181 tlen = hlen + sizeof(struct tcphdr) + optlen; 1182 1183 /* 1184 * XXX 1185 * assume that the entire packet will fit in a header mbuf 1186 */ 1187 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small")); 1188 1189 /* 1190 * XXX shouldn't this reuse the mbuf if possible ? 1191 * Create the IP+TCP header from scratch. 1192 */ 1193 if (m) 1194 m_freem(m); 1195 1196 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1197 if (m == NULL) 1198 return (ENOBUFS); 1199 m->m_data += max_linkhdr; 1200 m->m_len = tlen; 1201 m->m_pkthdr.len = tlen; 1202 m->m_pkthdr.rcvif = NULL; 1203 1204 if (isipv6) { 1205 ip6 = mtod(m, struct ip6_hdr *); 1206 ip6->ip6_vfc = IPV6_VERSION; 1207 ip6->ip6_nxt = IPPROTO_TCP; 1208 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1209 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1210 ip6->ip6_plen = htons(tlen - hlen); 1211 /* ip6_hlim is set after checksum */ 1212 /* ip6_flow = ??? */ 1213 1214 th = (struct tcphdr *)(ip6 + 1); 1215 } else { 1216 ip = mtod(m, struct ip *); 1217 ip->ip_v = IPVERSION; 1218 ip->ip_hl = sizeof(struct ip) >> 2; 1219 ip->ip_len = tlen; 1220 ip->ip_id = 0; 1221 ip->ip_off = 0; 1222 ip->ip_sum = 0; 1223 ip->ip_p = IPPROTO_TCP; 1224 ip->ip_src = sc->sc_inc.inc_laddr; 1225 ip->ip_dst = sc->sc_inc.inc_faddr; 1226 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */ 1227 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */ 1228 1229 /* 1230 * See if we should do MTU discovery. Route lookups are 1231 * expensive, so we will only unset the DF bit if: 1232 * 1233 * 1) path_mtu_discovery is disabled 1234 * 2) the SCF_UNREACH flag has been set 1235 */ 1236 if (path_mtu_discovery 1237 && ((sc->sc_flags & SCF_UNREACH) == 0)) { 1238 ip->ip_off |= IP_DF; 1239 } 1240 1241 th = (struct tcphdr *)(ip + 1); 1242 } 1243 th->th_sport = sc->sc_inc.inc_lport; 1244 th->th_dport = sc->sc_inc.inc_fport; 1245 1246 th->th_seq = htonl(sc->sc_iss); 1247 th->th_ack = htonl(sc->sc_irs + 1); 1248 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1249 th->th_x2 = 0; 1250 th->th_flags = TH_SYN | TH_ACK; 1251 th->th_win = htons(sc->sc_wnd); 1252 th->th_urp = 0; 1253 1254 /* Tack on the TCP options. */ 1255 if (optlen == 0) 1256 goto no_options; 1257 optp = (u_int8_t *)(th + 1); 1258 *optp++ = TCPOPT_MAXSEG; 1259 *optp++ = TCPOLEN_MAXSEG; 1260 *optp++ = (mssopt >> 8) & 0xff; 1261 *optp++ = mssopt & 0xff; 1262 1263 if (sc->sc_flags & SCF_WINSCALE) { 1264 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | 1265 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | 1266 sc->sc_request_r_scale); 1267 optp += 4; 1268 } 1269 1270 if (sc->sc_flags & SCF_TIMESTAMP) { 1271 u_int32_t *lp = (u_int32_t *)(optp); 1272 1273 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1274 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1275 *lp++ = htonl(ticks); 1276 *lp = htonl(sc->sc_tsrecent); 1277 optp += TCPOLEN_TSTAMP_APPA; 1278 } 1279 1280 #ifdef TCP_SIGNATURE 1281 /* 1282 * Handle TCP-MD5 passive opener response. 1283 */ 1284 if (sc->sc_flags & SCF_SIGNATURE) { 1285 u_int8_t *bp = optp; 1286 int i; 1287 1288 *bp++ = TCPOPT_SIGNATURE; 1289 *bp++ = TCPOLEN_SIGNATURE; 1290 for (i = 0; i < TCP_SIGLEN; i++) 1291 *bp++ = 0; 1292 tcpsignature_compute(m, 0, optlen, 1293 optp + 2, IPSEC_DIR_OUTBOUND); 1294 *bp++ = TCPOPT_NOP; 1295 *bp++ = TCPOPT_EOL; 1296 optp += TCPOLEN_SIGNATURE + 2; 1297 } 1298 #endif /* TCP_SIGNATURE */ 1299 1300 if (sc->sc_flags & SCF_SACK_PERMITTED) { 1301 *((u_int32_t *)optp) = htonl(TCPOPT_SACK_PERMITTED_ALIGNED); 1302 optp += TCPOLEN_SACK_PERMITTED_ALIGNED; 1303 } 1304 1305 no_options: 1306 if (isipv6) { 1307 struct route_in6 *ro6 = &sc->sc_route6; 1308 1309 th->th_sum = 0; 1310 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); 1311 ip6->ip6_hlim = in6_selecthlim(NULL, 1312 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL); 1313 error = ip6_output(m, NULL, ro6, 0, NULL, NULL, 1314 sc->sc_tp->t_inpcb); 1315 } else { 1316 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1317 htons(tlen - hlen + IPPROTO_TCP)); 1318 m->m_pkthdr.csum_flags = CSUM_TCP; 1319 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1320 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr) + optlen; 1321 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 1322 IP_DEBUGROUTE, NULL, sc->sc_tp->t_inpcb); 1323 } 1324 return (error); 1325 } 1326 1327 /* 1328 * cookie layers: 1329 * 1330 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .| 1331 * | peer iss | 1332 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .| 1333 * | 0 |(A)| | 1334 * (A): peer mss index 1335 */ 1336 1337 /* 1338 * The values below are chosen to minimize the size of the tcp_secret 1339 * table, as well as providing roughly a 16 second lifetime for the cookie. 1340 */ 1341 1342 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */ 1343 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */ 1344 1345 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1) 1346 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS) 1347 #define SYNCOOKIE_TIMEOUT \ 1348 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT)) 1349 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK) 1350 1351 static struct { 1352 u_int32_t ts_secbits[4]; 1353 u_int ts_expire; 1354 } tcp_secret[SYNCOOKIE_NSECRETS]; 1355 1356 static int tcp_msstab[] = { 0, 536, 1460, 8960 }; 1357 1358 static MD5_CTX syn_ctx; 1359 1360 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v)) 1361 1362 struct md5_add { 1363 u_int32_t laddr, faddr; 1364 u_int32_t secbits[4]; 1365 u_int16_t lport, fport; 1366 }; 1367 1368 #ifdef CTASSERT 1369 CTASSERT(sizeof(struct md5_add) == 28); 1370 #endif 1371 1372 /* 1373 * Consider the problem of a recreated (and retransmitted) cookie. If the 1374 * original SYN was accepted, the connection is established. The second 1375 * SYN is inflight, and if it arrives with an ISN that falls within the 1376 * receive window, the connection is killed. 1377 * 1378 * However, since cookies have other problems, this may not be worth 1379 * worrying about. 1380 */ 1381 1382 static u_int32_t 1383 syncookie_generate(struct syncache *sc) 1384 { 1385 u_int32_t md5_buffer[4]; 1386 u_int32_t data; 1387 int idx, i; 1388 struct md5_add add; 1389 #ifdef INET6 1390 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 1391 #else 1392 const boolean_t isipv6 = FALSE; 1393 #endif 1394 1395 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK; 1396 if (tcp_secret[idx].ts_expire < ticks) { 1397 for (i = 0; i < 4; i++) 1398 tcp_secret[idx].ts_secbits[i] = karc4random(); 1399 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT; 1400 } 1401 for (data = NELEM(tcp_msstab) - 1; data > 0; data--) 1402 if (tcp_msstab[data] <= sc->sc_peer_mss) 1403 break; 1404 data = (data << SYNCOOKIE_WNDBITS) | idx; 1405 data ^= sc->sc_irs; /* peer's iss */ 1406 MD5Init(&syn_ctx); 1407 if (isipv6) { 1408 MD5Add(sc->sc_inc.inc6_laddr); 1409 MD5Add(sc->sc_inc.inc6_faddr); 1410 add.laddr = 0; 1411 add.faddr = 0; 1412 } else { 1413 add.laddr = sc->sc_inc.inc_laddr.s_addr; 1414 add.faddr = sc->sc_inc.inc_faddr.s_addr; 1415 } 1416 add.lport = sc->sc_inc.inc_lport; 1417 add.fport = sc->sc_inc.inc_fport; 1418 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1419 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1420 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1421 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1422 MD5Add(add); 1423 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1424 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK); 1425 return (data); 1426 } 1427 1428 static struct syncache * 1429 syncookie_lookup(struct in_conninfo *inc, struct tcphdr *th, struct socket *so) 1430 { 1431 u_int32_t md5_buffer[4]; 1432 struct syncache *sc; 1433 u_int32_t data; 1434 int wnd, idx; 1435 struct md5_add add; 1436 1437 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */ 1438 idx = data & SYNCOOKIE_WNDMASK; 1439 if (tcp_secret[idx].ts_expire < ticks || 1440 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) 1441 return (NULL); 1442 MD5Init(&syn_ctx); 1443 #ifdef INET6 1444 if (inc->inc_isipv6) { 1445 MD5Add(inc->inc6_laddr); 1446 MD5Add(inc->inc6_faddr); 1447 add.laddr = 0; 1448 add.faddr = 0; 1449 } else 1450 #endif 1451 { 1452 add.laddr = inc->inc_laddr.s_addr; 1453 add.faddr = inc->inc_faddr.s_addr; 1454 } 1455 add.lport = inc->inc_lport; 1456 add.fport = inc->inc_fport; 1457 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1458 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1459 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1460 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1461 MD5Add(add); 1462 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1463 data ^= md5_buffer[0]; 1464 if (data & ~SYNCOOKIE_DATAMASK) 1465 return (NULL); 1466 data = data >> SYNCOOKIE_WNDBITS; 1467 1468 /* 1469 * Fill in the syncache values. 1470 * XXX duplicate code from syncache_add 1471 */ 1472 sc = kmalloc(sizeof(struct syncache), M_SYNCACHE, M_WAITOK|M_ZERO); 1473 sc->sc_ipopts = NULL; 1474 sc->sc_inc.inc_fport = inc->inc_fport; 1475 sc->sc_inc.inc_lport = inc->inc_lport; 1476 #ifdef INET6 1477 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1478 if (inc->inc_isipv6) { 1479 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1480 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1481 sc->sc_route6.ro_rt = NULL; 1482 } else 1483 #endif 1484 { 1485 sc->sc_inc.inc_faddr = inc->inc_faddr; 1486 sc->sc_inc.inc_laddr = inc->inc_laddr; 1487 sc->sc_route.ro_rt = NULL; 1488 } 1489 sc->sc_irs = th->th_seq - 1; 1490 sc->sc_iss = th->th_ack - 1; 1491 wnd = ssb_space(&so->so_rcv); 1492 wnd = imax(wnd, 0); 1493 wnd = imin(wnd, TCP_MAXWIN); 1494 sc->sc_wnd = wnd; 1495 sc->sc_flags = 0; 1496 sc->sc_rxtslot = 0; 1497 sc->sc_peer_mss = tcp_msstab[data]; 1498 return (sc); 1499 } 1500