1 /*- 2 * Copyright (c) 2001 Networks Associates Technologies, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Jonathan Lemon 6 * and NAI Labs, the Security Research Division of Network Associates, Inc. 7 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 8 * DARPA CHATS research program. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the author may not be used to endorse or promote 19 * products derived from this software without specific prior written 20 * permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/netinet/tcp_syncache.c,v 1.5.2.14 2003/02/24 04:02:27 silby Exp $ 35 * $DragonFly: src/sys/netinet/tcp_syncache.c,v 1.11 2004/03/04 01:02:05 hsu Exp $ 36 */ 37 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/sysctl.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/md5.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/random.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/in_cksum.h> 53 54 #include <net/if.h> 55 #include <net/route.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 #include <netinet/in_var.h> 61 #include <netinet/in_pcb.h> 62 #include <netinet/ip_var.h> 63 #ifdef INET6 64 #include <netinet/ip6.h> 65 #include <netinet/icmp6.h> 66 #include <netinet6/nd6.h> 67 #include <netinet6/ip6_var.h> 68 #include <netinet6/in6_pcb.h> 69 #endif 70 #include <netinet/tcp.h> 71 #include <netinet/tcp_fsm.h> 72 #include <netinet/tcp_seq.h> 73 #include <netinet/tcp_timer.h> 74 #include <netinet/tcp_var.h> 75 #ifdef INET6 76 #include <netinet6/tcp6_var.h> 77 #endif 78 79 #ifdef IPSEC 80 #include <netinet6/ipsec.h> 81 #ifdef INET6 82 #include <netinet6/ipsec6.h> 83 #endif 84 #include <netproto/key/key.h> 85 #endif /*IPSEC*/ 86 87 #ifdef FAST_IPSEC 88 #include <netipsec/ipsec.h> 89 #ifdef INET6 90 #include <netipsec/ipsec6.h> 91 #endif 92 #include <netipsec/key.h> 93 #define IPSEC 94 #endif /*FAST_IPSEC*/ 95 96 #include <vm/vm_zone.h> 97 98 static int tcp_syncookies = 1; 99 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, 100 &tcp_syncookies, 0, 101 "Use TCP SYN cookies if the syncache overflows"); 102 103 static void syncache_drop(struct syncache *, struct syncache_head *); 104 static void syncache_free(struct syncache *); 105 static void syncache_insert(struct syncache *, struct syncache_head *); 106 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); 107 static int syncache_respond(struct syncache *, struct mbuf *); 108 static struct socket *syncache_socket(struct syncache *, struct socket *); 109 static void syncache_timer(void *); 110 static u_int32_t syncookie_generate(struct syncache *); 111 static struct syncache *syncookie_lookup(struct in_conninfo *, 112 struct tcphdr *, struct socket *); 113 114 /* 115 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 116 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds, 117 * the odds are that the user has given up attempting to connect by then. 118 */ 119 #define SYNCACHE_MAXREXMTS 3 120 121 /* Arbitrary values */ 122 #define TCP_SYNCACHE_HASHSIZE 512 123 #define TCP_SYNCACHE_BUCKETLIMIT 30 124 125 struct tcp_syncache { 126 struct syncache_head *hashbase; 127 struct vm_zone *zone; 128 u_int hashsize; 129 u_int hashmask; 130 u_int bucket_limit; 131 u_int cache_count; 132 u_int cache_limit; 133 u_int rexmt_limit; 134 u_int hash_secret; 135 TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1]; 136 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; 137 }; 138 static struct tcp_syncache tcp_syncache; 139 140 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); 141 142 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD, 143 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); 144 145 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD, 146 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); 147 148 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD, 149 &tcp_syncache.cache_count, 0, "Current number of entries in syncache"); 150 151 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD, 152 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); 153 154 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, 155 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); 156 157 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 158 159 #define SYNCACHE_HASH(inc, mask) \ 160 ((tcp_syncache.hash_secret ^ \ 161 (inc)->inc_faddr.s_addr ^ \ 162 ((inc)->inc_faddr.s_addr >> 16) ^ \ 163 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 164 165 #define SYNCACHE_HASH6(inc, mask) \ 166 ((tcp_syncache.hash_secret ^ \ 167 (inc)->inc6_faddr.s6_addr32[0] ^ \ 168 (inc)->inc6_faddr.s6_addr32[3] ^ \ 169 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 170 171 #define ENDPTS_EQ(a, b) ( \ 172 (a)->ie_fport == (b)->ie_fport && \ 173 (a)->ie_lport == (b)->ie_lport && \ 174 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ 175 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ 176 ) 177 178 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) 179 180 #define SYNCACHE_TIMEOUT(sc, slot) do { \ 181 sc->sc_rxtslot = slot; \ 182 sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[slot]; \ 183 TAILQ_INSERT_TAIL(&tcp_syncache.timerq[slot], sc, sc_timerq); \ 184 if (!callout_active(&tcp_syncache.tt_timerq[slot])) \ 185 callout_reset(&tcp_syncache.tt_timerq[slot], \ 186 TCPTV_RTOBASE * tcp_backoff[slot], \ 187 syncache_timer, (void *)((intptr_t)slot)); \ 188 } while (0) 189 190 static void 191 syncache_free(struct syncache *sc) 192 { 193 struct rtentry *rt; 194 195 if (sc->sc_ipopts) 196 (void) m_free(sc->sc_ipopts); 197 #ifdef INET6 198 if (sc->sc_inc.inc_isipv6) 199 rt = sc->sc_route6.ro_rt; 200 else 201 #endif 202 rt = sc->sc_route.ro_rt; 203 if (rt != NULL) { 204 /* 205 * If this is the only reference to a protocol cloned 206 * route, remove it immediately. 207 */ 208 if (rt->rt_flags & RTF_WASCLONED && 209 (sc->sc_flags & SCF_KEEPROUTE) == 0 && 210 rt->rt_refcnt == 1) 211 rtrequest(RTM_DELETE, rt_key(rt), 212 rt->rt_gateway, rt_mask(rt), 213 rt->rt_flags, NULL); 214 RTFREE(rt); 215 } 216 zfree(tcp_syncache.zone, sc); 217 } 218 219 void 220 syncache_init(void) 221 { 222 int i; 223 224 tcp_syncache.cache_count = 0; 225 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 226 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 227 tcp_syncache.cache_limit = 228 tcp_syncache.hashsize * tcp_syncache.bucket_limit; 229 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 230 tcp_syncache.hash_secret = arc4random(); 231 232 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 233 &tcp_syncache.hashsize); 234 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 235 &tcp_syncache.cache_limit); 236 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 237 &tcp_syncache.bucket_limit); 238 if (!powerof2(tcp_syncache.hashsize)) { 239 printf("WARNING: syncache hash size is not a power of 2.\n"); 240 tcp_syncache.hashsize = 512; /* safe default */ 241 } 242 tcp_syncache.hashmask = tcp_syncache.hashsize - 1; 243 244 /* Allocate the hash table. */ 245 MALLOC(tcp_syncache.hashbase, struct syncache_head *, 246 tcp_syncache.hashsize * sizeof(struct syncache_head), 247 M_SYNCACHE, M_WAITOK); 248 249 /* Initialize the hash buckets. */ 250 for (i = 0; i < tcp_syncache.hashsize; i++) { 251 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket); 252 tcp_syncache.hashbase[i].sch_length = 0; 253 } 254 255 /* Initialize the timer queues. */ 256 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) { 257 TAILQ_INIT(&tcp_syncache.timerq[i]); 258 callout_init(&tcp_syncache.tt_timerq[i]); 259 } 260 261 /* 262 * Allocate the syncache entries. Allow the zone to allocate one 263 * more entry than cache limit, so a new entry can bump out an 264 * older one. 265 */ 266 tcp_syncache.zone = zinit("syncache", sizeof(struct syncache), 267 tcp_syncache.cache_limit, ZONE_INTERRUPT, 0); 268 tcp_syncache.cache_limit -= 1; 269 } 270 271 static void 272 syncache_insert(sc, sch) 273 struct syncache *sc; 274 struct syncache_head *sch; 275 { 276 struct syncache *sc2; 277 int i; 278 279 /* 280 * Make sure that we don't overflow the per-bucket 281 * limit or the total cache size limit. 282 */ 283 if (sch->sch_length >= tcp_syncache.bucket_limit) { 284 /* 285 * The bucket is full, toss the oldest element. 286 */ 287 sc2 = TAILQ_FIRST(&sch->sch_bucket); 288 sc2->sc_tp->ts_recent = ticks; 289 syncache_drop(sc2, sch); 290 tcpstat.tcps_sc_bucketoverflow++; 291 } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) { 292 /* 293 * The cache is full. Toss the oldest entry in the 294 * entire cache. This is the front entry in the 295 * first non-empty timer queue with the largest 296 * timeout value. 297 */ 298 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { 299 sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]); 300 if (sc2 != NULL) 301 break; 302 } 303 sc2->sc_tp->ts_recent = ticks; 304 syncache_drop(sc2, NULL); 305 tcpstat.tcps_sc_cacheoverflow++; 306 } 307 308 /* Initialize the entry's timer. */ 309 SYNCACHE_TIMEOUT(sc, 0); 310 311 /* Put it into the bucket. */ 312 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash); 313 sch->sch_length++; 314 tcp_syncache.cache_count++; 315 tcpstat.tcps_sc_added++; 316 } 317 318 static void 319 syncache_drop(sc, sch) 320 struct syncache *sc; 321 struct syncache_head *sch; 322 { 323 324 if (sch == NULL) { 325 #ifdef INET6 326 if (sc->sc_inc.inc_isipv6) { 327 sch = &tcp_syncache.hashbase[ 328 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; 329 } else 330 #endif 331 { 332 sch = &tcp_syncache.hashbase[ 333 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)]; 334 } 335 } 336 337 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 338 sch->sch_length--; 339 tcp_syncache.cache_count--; 340 341 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq); 342 if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot])) 343 callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]); 344 345 syncache_free(sc); 346 } 347 348 /* 349 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 350 * If we have retransmitted an entry the maximum number of times, expire it. 351 */ 352 static void 353 syncache_timer(xslot) 354 void *xslot; 355 { 356 intptr_t slot = (intptr_t)xslot; 357 struct syncache *sc, *nsc; 358 struct inpcb *inp; 359 int s; 360 361 s = splnet(); 362 if (callout_pending(&tcp_syncache.tt_timerq[slot]) || 363 !callout_active(&tcp_syncache.tt_timerq[slot])) { 364 splx(s); 365 return; 366 } 367 callout_deactivate(&tcp_syncache.tt_timerq[slot]); 368 369 nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]); 370 while (nsc != NULL) { 371 if (ticks < nsc->sc_rxttime) 372 break; 373 sc = nsc; 374 inp = sc->sc_tp->t_inpcb; 375 if (slot == SYNCACHE_MAXREXMTS || 376 slot >= tcp_syncache.rexmt_limit || 377 inp->inp_gencnt != sc->sc_inp_gencnt) { 378 nsc = TAILQ_NEXT(sc, sc_timerq); 379 syncache_drop(sc, NULL); 380 tcpstat.tcps_sc_stale++; 381 continue; 382 } 383 /* 384 * syncache_respond() may call back into the syncache to 385 * to modify another entry, so do not obtain the next 386 * entry on the timer chain until it has completed. 387 */ 388 (void) syncache_respond(sc, NULL); 389 nsc = TAILQ_NEXT(sc, sc_timerq); 390 tcpstat.tcps_sc_retransmitted++; 391 TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq); 392 SYNCACHE_TIMEOUT(sc, slot + 1); 393 } 394 if (nsc != NULL) 395 callout_reset(&tcp_syncache.tt_timerq[slot], 396 nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot)); 397 splx(s); 398 } 399 400 /* 401 * Find an entry in the syncache. 402 */ 403 struct syncache * 404 syncache_lookup(inc, schp) 405 struct in_conninfo *inc; 406 struct syncache_head **schp; 407 { 408 struct syncache *sc; 409 struct syncache_head *sch; 410 411 #ifdef INET6 412 if (inc->inc_isipv6) { 413 sch = &tcp_syncache.hashbase[ 414 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; 415 *schp = sch; 416 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) 417 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 418 return (sc); 419 } else 420 #endif 421 { 422 sch = &tcp_syncache.hashbase[ 423 SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; 424 *schp = sch; 425 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 426 #ifdef INET6 427 if (sc->sc_inc.inc_isipv6) 428 continue; 429 #endif 430 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 431 return (sc); 432 } 433 } 434 return (NULL); 435 } 436 437 /* 438 * This function is called when we get a RST for a 439 * non-existent connection, so that we can see if the 440 * connection is in the syn cache. If it is, zap it. 441 */ 442 void 443 syncache_chkrst(inc, th) 444 struct in_conninfo *inc; 445 struct tcphdr *th; 446 { 447 struct syncache *sc; 448 struct syncache_head *sch; 449 450 sc = syncache_lookup(inc, &sch); 451 if (sc == NULL) 452 return; 453 /* 454 * If the RST bit is set, check the sequence number to see 455 * if this is a valid reset segment. 456 * RFC 793 page 37: 457 * In all states except SYN-SENT, all reset (RST) segments 458 * are validated by checking their SEQ-fields. A reset is 459 * valid if its sequence number is in the window. 460 * 461 * The sequence number in the reset segment is normally an 462 * echo of our outgoing acknowlegement numbers, but some hosts 463 * send a reset with the sequence number at the rightmost edge 464 * of our receive window, and we have to handle this case. 465 */ 466 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 467 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 468 syncache_drop(sc, sch); 469 tcpstat.tcps_sc_reset++; 470 } 471 } 472 473 void 474 syncache_badack(inc) 475 struct in_conninfo *inc; 476 { 477 struct syncache *sc; 478 struct syncache_head *sch; 479 480 sc = syncache_lookup(inc, &sch); 481 if (sc != NULL) { 482 syncache_drop(sc, sch); 483 tcpstat.tcps_sc_badack++; 484 } 485 } 486 487 void 488 syncache_unreach(inc, th) 489 struct in_conninfo *inc; 490 struct tcphdr *th; 491 { 492 struct syncache *sc; 493 struct syncache_head *sch; 494 495 /* we are called at splnet() here */ 496 sc = syncache_lookup(inc, &sch); 497 if (sc == NULL) 498 return; 499 500 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 501 if (ntohl(th->th_seq) != sc->sc_iss) 502 return; 503 504 /* 505 * If we've rertransmitted 3 times and this is our second error, 506 * we remove the entry. Otherwise, we allow it to continue on. 507 * This prevents us from incorrectly nuking an entry during a 508 * spurious network outage. 509 * 510 * See tcp_notify(). 511 */ 512 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) { 513 sc->sc_flags |= SCF_UNREACH; 514 return; 515 } 516 syncache_drop(sc, sch); 517 tcpstat.tcps_sc_unreach++; 518 } 519 520 /* 521 * Build a new TCP socket structure from a syncache entry. 522 */ 523 static struct socket * 524 syncache_socket(sc, lso) 525 struct syncache *sc; 526 struct socket *lso; 527 { 528 struct inpcb *inp = NULL; 529 struct socket *so; 530 struct tcpcb *tp; 531 532 /* 533 * Ok, create the full blown connection, and set things up 534 * as they would have been set up if we had created the 535 * connection when the SYN arrived. If we can't create 536 * the connection, abort it. 537 */ 538 so = sonewconn(lso, SS_ISCONNECTED); 539 if (so == NULL) { 540 /* 541 * Drop the connection; we will send a RST if the peer 542 * retransmits the ACK, 543 */ 544 tcpstat.tcps_listendrop++; 545 goto abort; 546 } 547 548 inp = sotoinpcb(so); 549 550 /* 551 * Insert new socket into hash list. 552 */ 553 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; 554 #ifdef INET6 555 if (sc->sc_inc.inc_isipv6) { 556 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 557 } else { 558 inp->inp_vflag &= ~INP_IPV6; 559 inp->inp_vflag |= INP_IPV4; 560 #endif 561 inp->inp_laddr = sc->sc_inc.inc_laddr; 562 #ifdef INET6 563 } 564 #endif 565 inp->inp_lport = sc->sc_inc.inc_lport; 566 if (in_pcbinsporthash(inp) != 0) { 567 /* 568 * Undo the assignments above if we failed to 569 * put the PCB on the hash lists. 570 */ 571 #ifdef INET6 572 if (sc->sc_inc.inc_isipv6) 573 inp->in6p_laddr = in6addr_any; 574 else 575 #endif 576 inp->inp_laddr.s_addr = INADDR_ANY; 577 inp->inp_lport = 0; 578 goto abort; 579 } 580 #ifdef IPSEC 581 /* copy old policy into new socket's */ 582 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) 583 printf("syncache_expand: could not copy policy\n"); 584 #endif 585 #ifdef INET6 586 if (sc->sc_inc.inc_isipv6) { 587 struct inpcb *oinp = sotoinpcb(lso); 588 struct in6_addr laddr6; 589 struct sockaddr_in6 sin6; 590 /* 591 * Inherit socket options from the listening socket. 592 * Note that in6p_inputopts are not (and should not be) 593 * copied, since it stores previously received options and is 594 * used to detect if each new option is different than the 595 * previous one and hence should be passed to a user. 596 * If we copied in6p_inputopts, a user would not be able to 597 * receive options just after calling the accept system call. 598 */ 599 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS; 600 if (oinp->in6p_outputopts) 601 inp->in6p_outputopts = 602 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT); 603 inp->in6p_route = sc->sc_route6; 604 sc->sc_route6.ro_rt = NULL; 605 606 sin6.sin6_family = AF_INET6; 607 sin6.sin6_len = sizeof sin6; 608 sin6.sin6_addr = sc->sc_inc.inc6_faddr; 609 sin6.sin6_port = sc->sc_inc.inc_fport; 610 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0; 611 laddr6 = inp->in6p_laddr; 612 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 613 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 614 if (in6_pcbconnect(inp, (struct sockaddr *)&sin6, &thread0)) { 615 inp->in6p_laddr = laddr6; 616 goto abort; 617 } 618 } else 619 #endif 620 { 621 struct in_addr laddr; 622 struct sockaddr_in sin; 623 624 inp->inp_options = ip_srcroute(); 625 if (inp->inp_options == NULL) { 626 inp->inp_options = sc->sc_ipopts; 627 sc->sc_ipopts = NULL; 628 } 629 inp->inp_route = sc->sc_route; 630 sc->sc_route.ro_rt = NULL; 631 632 sin.sin_family = AF_INET; 633 sin.sin_len = sizeof sin; 634 sin.sin_addr = sc->sc_inc.inc_faddr; 635 sin.sin_port = sc->sc_inc.inc_fport; 636 bzero(sin.sin_zero, sizeof sin.sin_zero); 637 laddr = inp->inp_laddr; 638 if (inp->inp_laddr.s_addr == INADDR_ANY) 639 inp->inp_laddr = sc->sc_inc.inc_laddr; 640 if (in_pcbconnect(inp, (struct sockaddr *)&sin, &thread0)) { 641 inp->inp_laddr = laddr; 642 goto abort; 643 } 644 } 645 646 tp = intotcpcb(inp); 647 tp->t_state = TCPS_SYN_RECEIVED; 648 tp->iss = sc->sc_iss; 649 tp->irs = sc->sc_irs; 650 tcp_rcvseqinit(tp); 651 tcp_sendseqinit(tp); 652 tp->snd_wl1 = sc->sc_irs; 653 tp->rcv_up = sc->sc_irs + 1; 654 tp->rcv_wnd = sc->sc_wnd; 655 tp->rcv_adv += tp->rcv_wnd; 656 657 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY); 658 if (sc->sc_flags & SCF_NOOPT) 659 tp->t_flags |= TF_NOOPT; 660 if (sc->sc_flags & SCF_WINSCALE) { 661 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; 662 tp->requested_s_scale = sc->sc_requested_s_scale; 663 tp->request_r_scale = sc->sc_request_r_scale; 664 } 665 if (sc->sc_flags & SCF_TIMESTAMP) { 666 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; 667 tp->ts_recent = sc->sc_tsrecent; 668 tp->ts_recent_age = ticks; 669 } 670 if (sc->sc_flags & SCF_CC) { 671 /* 672 * Initialization of the tcpcb for transaction; 673 * set SND.WND = SEG.WND, 674 * initialize CCsend and CCrecv. 675 */ 676 tp->t_flags |= TF_REQ_CC|TF_RCVD_CC; 677 tp->cc_send = sc->sc_cc_send; 678 tp->cc_recv = sc->sc_cc_recv; 679 } 680 681 tcp_mss(tp, sc->sc_peer_mss); 682 683 /* 684 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment. 685 */ 686 if (sc->sc_rxtslot != 0) 687 tp->snd_cwnd = tp->t_maxseg; 688 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); 689 690 tcpstat.tcps_accepts++; 691 return (so); 692 693 abort: 694 if (so != NULL) 695 (void) soabort(so); 696 return (NULL); 697 } 698 699 /* 700 * This function gets called when we receive an ACK for a 701 * socket in the LISTEN state. We look up the connection 702 * in the syncache, and if its there, we pull it out of 703 * the cache and turn it into a full-blown connection in 704 * the SYN-RECEIVED state. 705 */ 706 int 707 syncache_expand(inc, th, sop, m) 708 struct in_conninfo *inc; 709 struct tcphdr *th; 710 struct socket **sop; 711 struct mbuf *m; 712 { 713 struct syncache *sc; 714 struct syncache_head *sch; 715 struct socket *so; 716 717 sc = syncache_lookup(inc, &sch); 718 if (sc == NULL) { 719 /* 720 * There is no syncache entry, so see if this ACK is 721 * a returning syncookie. To do this, first: 722 * A. See if this socket has had a syncache entry dropped in 723 * the past. We don't want to accept a bogus syncookie 724 * if we've never received a SYN. 725 * B. check that the syncookie is valid. If it is, then 726 * cobble up a fake syncache entry, and return. 727 */ 728 if (!tcp_syncookies) 729 return (0); 730 sc = syncookie_lookup(inc, th, *sop); 731 if (sc == NULL) 732 return (0); 733 sch = NULL; 734 tcpstat.tcps_sc_recvcookie++; 735 } 736 737 /* 738 * If seg contains an ACK, but not for our SYN/ACK, send a RST. 739 */ 740 if (th->th_ack != sc->sc_iss + 1) 741 return (0); 742 743 so = syncache_socket(sc, *sop); 744 if (so == NULL) { 745 #if 0 746 resetandabort: 747 /* XXXjlemon check this - is this correct? */ 748 (void) tcp_respond(NULL, m, m, th, 749 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK); 750 #endif 751 m_freem(m); /* XXX only needed for above */ 752 tcpstat.tcps_sc_aborted++; 753 } else { 754 sc->sc_flags |= SCF_KEEPROUTE; 755 tcpstat.tcps_sc_completed++; 756 } 757 if (sch == NULL) 758 syncache_free(sc); 759 else 760 syncache_drop(sc, sch); 761 *sop = so; 762 return (1); 763 } 764 765 /* 766 * Given a LISTEN socket and an inbound SYN request, add 767 * this to the syn cache, and send back a segment: 768 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 769 * to the source. 770 * 771 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 772 * Doing so would require that we hold onto the data and deliver it 773 * to the application. However, if we are the target of a SYN-flood 774 * DoS attack, an attacker could send data which would eventually 775 * consume all available buffer space if it were ACKed. By not ACKing 776 * the data, we avoid this DoS scenario. 777 */ 778 int 779 syncache_add(inc, to, th, sop, m) 780 struct in_conninfo *inc; 781 struct tcpopt *to; 782 struct tcphdr *th; 783 struct socket **sop; 784 struct mbuf *m; 785 { 786 struct tcpcb *tp; 787 struct socket *so; 788 struct syncache *sc = NULL; 789 struct syncache_head *sch; 790 struct mbuf *ipopts = NULL; 791 struct rmxp_tao *taop; 792 int win; 793 794 so = *sop; 795 tp = sototcpcb(so); 796 797 /* 798 * Remember the IP options, if any. 799 */ 800 #ifdef INET6 801 if (!inc->inc_isipv6) 802 #endif 803 ipopts = ip_srcroute(); 804 805 /* 806 * See if we already have an entry for this connection. 807 * If we do, resend the SYN,ACK, and reset the retransmit timer. 808 * 809 * XXX 810 * should the syncache be re-initialized with the contents 811 * of the new SYN here (which may have different options?) 812 */ 813 sc = syncache_lookup(inc, &sch); 814 if (sc != NULL) { 815 tcpstat.tcps_sc_dupsyn++; 816 if (ipopts) { 817 /* 818 * If we were remembering a previous source route, 819 * forget it and use the new one we've been given. 820 */ 821 if (sc->sc_ipopts) 822 (void) m_free(sc->sc_ipopts); 823 sc->sc_ipopts = ipopts; 824 } 825 /* 826 * Update timestamp if present. 827 */ 828 if (sc->sc_flags & SCF_TIMESTAMP) 829 sc->sc_tsrecent = to->to_tsval; 830 /* 831 * PCB may have changed, pick up new values. 832 */ 833 sc->sc_tp = tp; 834 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 835 if (syncache_respond(sc, m) == 0) { 836 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], 837 sc, sc_timerq); 838 SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot); 839 tcpstat.tcps_sndacks++; 840 tcpstat.tcps_sndtotal++; 841 } 842 *sop = NULL; 843 return (1); 844 } 845 846 /* 847 * This allocation is guaranteed to succeed because we 848 * preallocate one more syncache entry than cache_limit. 849 */ 850 sc = zalloc(tcp_syncache.zone); 851 852 /* 853 * Fill in the syncache values. 854 */ 855 sc->sc_tp = tp; 856 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 857 sc->sc_ipopts = ipopts; 858 sc->sc_inc.inc_fport = inc->inc_fport; 859 sc->sc_inc.inc_lport = inc->inc_lport; 860 #ifdef INET6 861 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 862 if (inc->inc_isipv6) { 863 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 864 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 865 sc->sc_route6.ro_rt = NULL; 866 } else 867 #endif 868 { 869 sc->sc_inc.inc_faddr = inc->inc_faddr; 870 sc->sc_inc.inc_laddr = inc->inc_laddr; 871 sc->sc_route.ro_rt = NULL; 872 } 873 sc->sc_irs = th->th_seq; 874 sc->sc_flags = 0; 875 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0; 876 if (tcp_syncookies) 877 sc->sc_iss = syncookie_generate(sc); 878 else 879 sc->sc_iss = arc4random(); 880 881 /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */ 882 win = sbspace(&so->so_rcv); 883 win = imax(win, 0); 884 win = imin(win, TCP_MAXWIN); 885 sc->sc_wnd = win; 886 887 if (tcp_do_rfc1323) { 888 /* 889 * A timestamp received in a SYN makes 890 * it ok to send timestamp requests and replies. 891 */ 892 if (to->to_flags & TOF_TS) { 893 sc->sc_tsrecent = to->to_tsval; 894 sc->sc_flags |= SCF_TIMESTAMP; 895 } 896 if (to->to_flags & TOF_SCALE) { 897 int wscale = 0; 898 899 /* Compute proper scaling value from buffer space */ 900 while (wscale < TCP_MAX_WINSHIFT && 901 (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat) 902 wscale++; 903 sc->sc_request_r_scale = wscale; 904 sc->sc_requested_s_scale = to->to_requested_s_scale; 905 sc->sc_flags |= SCF_WINSCALE; 906 } 907 } 908 if (tcp_do_rfc1644) { 909 /* 910 * A CC or CC.new option received in a SYN makes 911 * it ok to send CC in subsequent segments. 912 */ 913 if (to->to_flags & (TOF_CC|TOF_CCNEW)) { 914 sc->sc_cc_recv = to->to_cc; 915 sc->sc_cc_send = CC_INC(tcp_ccgen); 916 sc->sc_flags |= SCF_CC; 917 } 918 } 919 if (tp->t_flags & TF_NOOPT) 920 sc->sc_flags = SCF_NOOPT; 921 922 /* 923 * XXX 924 * We have the option here of not doing TAO (even if the segment 925 * qualifies) and instead fall back to a normal 3WHS via the syncache. 926 * This allows us to apply synflood protection to TAO-qualifying SYNs 927 * also. However, there should be a hueristic to determine when to 928 * do this, and is not present at the moment. 929 */ 930 931 /* 932 * Perform TAO test on incoming CC (SEG.CC) option, if any. 933 * - compare SEG.CC against cached CC from the same host, if any. 934 * - if SEG.CC > chached value, SYN must be new and is accepted 935 * immediately: save new CC in the cache, mark the socket 936 * connected, enter ESTABLISHED state, turn on flag to 937 * send a SYN in the next segment. 938 * A virtual advertised window is set in rcv_adv to 939 * initialize SWS prevention. Then enter normal segment 940 * processing: drop SYN, process data and FIN. 941 * - otherwise do a normal 3-way handshake. 942 */ 943 taop = tcp_gettaocache(&sc->sc_inc); 944 if ((to->to_flags & TOF_CC) != 0) { 945 if (((tp->t_flags & TF_NOPUSH) != 0) && 946 sc->sc_flags & SCF_CC && 947 taop != NULL && taop->tao_cc != 0 && 948 CC_GT(to->to_cc, taop->tao_cc)) { 949 sc->sc_rxtslot = 0; 950 so = syncache_socket(sc, *sop); 951 if (so != NULL) { 952 sc->sc_flags |= SCF_KEEPROUTE; 953 taop->tao_cc = to->to_cc; 954 *sop = so; 955 } 956 syncache_free(sc); 957 return (so != NULL); 958 } 959 } else { 960 /* 961 * No CC option, but maybe CC.NEW: invalidate cached value. 962 */ 963 if (taop != NULL) 964 taop->tao_cc = 0; 965 } 966 /* 967 * TAO test failed or there was no CC option, 968 * do a standard 3-way handshake. 969 */ 970 if (syncache_respond(sc, m) == 0) { 971 syncache_insert(sc, sch); 972 tcpstat.tcps_sndacks++; 973 tcpstat.tcps_sndtotal++; 974 } else { 975 syncache_free(sc); 976 tcpstat.tcps_sc_dropped++; 977 } 978 *sop = NULL; 979 return (1); 980 } 981 982 static int 983 syncache_respond(sc, m) 984 struct syncache *sc; 985 struct mbuf *m; 986 { 987 u_int8_t *optp; 988 int optlen, error; 989 u_int16_t tlen, hlen, mssopt; 990 struct ip *ip = NULL; 991 struct rtentry *rt; 992 struct tcphdr *th; 993 #ifdef INET6 994 struct ip6_hdr *ip6 = NULL; 995 #endif 996 997 #ifdef INET6 998 if (sc->sc_inc.inc_isipv6) { 999 rt = tcp_rtlookup6(&sc->sc_inc); 1000 if (rt != NULL) 1001 mssopt = rt->rt_ifp->if_mtu - 1002 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); 1003 else 1004 mssopt = tcp_v6mssdflt; 1005 hlen = sizeof(struct ip6_hdr); 1006 } else 1007 #endif 1008 { 1009 rt = tcp_rtlookup(&sc->sc_inc); 1010 if (rt != NULL) 1011 mssopt = rt->rt_ifp->if_mtu - 1012 (sizeof(struct ip) + sizeof(struct tcphdr)); 1013 else 1014 mssopt = tcp_mssdflt; 1015 hlen = sizeof(struct ip); 1016 } 1017 1018 /* Compute the size of the TCP options. */ 1019 if (sc->sc_flags & SCF_NOOPT) { 1020 optlen = 0; 1021 } else { 1022 optlen = TCPOLEN_MAXSEG + 1023 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) + 1024 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) + 1025 ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0); 1026 } 1027 tlen = hlen + sizeof(struct tcphdr) + optlen; 1028 1029 /* 1030 * XXX 1031 * assume that the entire packet will fit in a header mbuf 1032 */ 1033 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small")); 1034 1035 /* 1036 * XXX shouldn't this reuse the mbuf if possible ? 1037 * Create the IP+TCP header from scratch. 1038 */ 1039 if (m) 1040 m_freem(m); 1041 1042 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1043 if (m == NULL) 1044 return (ENOBUFS); 1045 m->m_data += max_linkhdr; 1046 m->m_len = tlen; 1047 m->m_pkthdr.len = tlen; 1048 m->m_pkthdr.rcvif = NULL; 1049 1050 #ifdef INET6 1051 if (sc->sc_inc.inc_isipv6) { 1052 ip6 = mtod(m, struct ip6_hdr *); 1053 ip6->ip6_vfc = IPV6_VERSION; 1054 ip6->ip6_nxt = IPPROTO_TCP; 1055 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1056 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1057 ip6->ip6_plen = htons(tlen - hlen); 1058 /* ip6_hlim is set after checksum */ 1059 /* ip6_flow = ??? */ 1060 1061 th = (struct tcphdr *)(ip6 + 1); 1062 } else 1063 #endif 1064 { 1065 ip = mtod(m, struct ip *); 1066 ip->ip_v = IPVERSION; 1067 ip->ip_hl = sizeof(struct ip) >> 2; 1068 ip->ip_len = tlen; 1069 ip->ip_id = 0; 1070 ip->ip_off = 0; 1071 ip->ip_sum = 0; 1072 ip->ip_p = IPPROTO_TCP; 1073 ip->ip_src = sc->sc_inc.inc_laddr; 1074 ip->ip_dst = sc->sc_inc.inc_faddr; 1075 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */ 1076 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */ 1077 1078 /* 1079 * See if we should do MTU discovery. Route lookups are expensive, 1080 * so we will only unset the DF bit if: 1081 * 1082 * 1) path_mtu_discovery is disabled 1083 * 2) the SCF_UNREACH flag has been set 1084 */ 1085 if (path_mtu_discovery 1086 && ((sc->sc_flags & SCF_UNREACH) == 0)) { 1087 ip->ip_off |= IP_DF; 1088 } 1089 1090 th = (struct tcphdr *)(ip + 1); 1091 } 1092 th->th_sport = sc->sc_inc.inc_lport; 1093 th->th_dport = sc->sc_inc.inc_fport; 1094 1095 th->th_seq = htonl(sc->sc_iss); 1096 th->th_ack = htonl(sc->sc_irs + 1); 1097 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1098 th->th_x2 = 0; 1099 th->th_flags = TH_SYN|TH_ACK; 1100 th->th_win = htons(sc->sc_wnd); 1101 th->th_urp = 0; 1102 1103 /* Tack on the TCP options. */ 1104 if (optlen == 0) 1105 goto no_options; 1106 optp = (u_int8_t *)(th + 1); 1107 *optp++ = TCPOPT_MAXSEG; 1108 *optp++ = TCPOLEN_MAXSEG; 1109 *optp++ = (mssopt >> 8) & 0xff; 1110 *optp++ = mssopt & 0xff; 1111 1112 if (sc->sc_flags & SCF_WINSCALE) { 1113 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | 1114 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | 1115 sc->sc_request_r_scale); 1116 optp += 4; 1117 } 1118 1119 if (sc->sc_flags & SCF_TIMESTAMP) { 1120 u_int32_t *lp = (u_int32_t *)(optp); 1121 1122 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1123 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1124 *lp++ = htonl(ticks); 1125 *lp = htonl(sc->sc_tsrecent); 1126 optp += TCPOLEN_TSTAMP_APPA; 1127 } 1128 1129 /* 1130 * Send CC and CC.echo if we received CC from our peer. 1131 */ 1132 if (sc->sc_flags & SCF_CC) { 1133 u_int32_t *lp = (u_int32_t *)(optp); 1134 1135 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1136 *lp++ = htonl(sc->sc_cc_send); 1137 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO)); 1138 *lp = htonl(sc->sc_cc_recv); 1139 optp += TCPOLEN_CC_APPA * 2; 1140 } 1141 no_options: 1142 1143 #ifdef INET6 1144 if (sc->sc_inc.inc_isipv6) { 1145 struct route_in6 *ro6 = &sc->sc_route6; 1146 1147 th->th_sum = 0; 1148 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); 1149 ip6->ip6_hlim = in6_selecthlim(NULL, 1150 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL); 1151 error = ip6_output(m, NULL, ro6, 0, NULL, NULL, 1152 sc->sc_tp->t_inpcb); 1153 } else 1154 #endif 1155 { 1156 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1157 htons(tlen - hlen + IPPROTO_TCP)); 1158 m->m_pkthdr.csum_flags = CSUM_TCP; 1159 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1160 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 0, NULL, 1161 sc->sc_tp->t_inpcb); 1162 } 1163 return (error); 1164 } 1165 1166 /* 1167 * cookie layers: 1168 * 1169 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .| 1170 * | peer iss | 1171 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .| 1172 * | 0 |(A)| | 1173 * (A): peer mss index 1174 */ 1175 1176 /* 1177 * The values below are chosen to minimize the size of the tcp_secret 1178 * table, as well as providing roughly a 16 second lifetime for the cookie. 1179 */ 1180 1181 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */ 1182 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */ 1183 1184 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1) 1185 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS) 1186 #define SYNCOOKIE_TIMEOUT \ 1187 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT)) 1188 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK) 1189 1190 static struct { 1191 u_int32_t ts_secbits[4]; 1192 u_int ts_expire; 1193 } tcp_secret[SYNCOOKIE_NSECRETS]; 1194 1195 static int tcp_msstab[] = { 0, 536, 1460, 8960 }; 1196 1197 static MD5_CTX syn_ctx; 1198 1199 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v)) 1200 1201 struct md5_add { 1202 u_int32_t laddr, faddr; 1203 u_int32_t secbits[4]; 1204 u_int16_t lport, fport; 1205 }; 1206 1207 #ifdef CTASSERT 1208 CTASSERT(sizeof(struct md5_add) == 28); 1209 #endif 1210 1211 /* 1212 * Consider the problem of a recreated (and retransmitted) cookie. If the 1213 * original SYN was accepted, the connection is established. The second 1214 * SYN is inflight, and if it arrives with an ISN that falls within the 1215 * receive window, the connection is killed. 1216 * 1217 * However, since cookies have other problems, this may not be worth 1218 * worrying about. 1219 */ 1220 1221 static u_int32_t 1222 syncookie_generate(struct syncache *sc) 1223 { 1224 u_int32_t md5_buffer[4]; 1225 u_int32_t data; 1226 int idx, i; 1227 struct md5_add add; 1228 1229 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK; 1230 if (tcp_secret[idx].ts_expire < ticks) { 1231 for (i = 0; i < 4; i++) 1232 tcp_secret[idx].ts_secbits[i] = arc4random(); 1233 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT; 1234 } 1235 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--) 1236 if (tcp_msstab[data] <= sc->sc_peer_mss) 1237 break; 1238 data = (data << SYNCOOKIE_WNDBITS) | idx; 1239 data ^= sc->sc_irs; /* peer's iss */ 1240 MD5Init(&syn_ctx); 1241 #ifdef INET6 1242 if (sc->sc_inc.inc_isipv6) { 1243 MD5Add(sc->sc_inc.inc6_laddr); 1244 MD5Add(sc->sc_inc.inc6_faddr); 1245 add.laddr = 0; 1246 add.faddr = 0; 1247 } else 1248 #endif 1249 { 1250 add.laddr = sc->sc_inc.inc_laddr.s_addr; 1251 add.faddr = sc->sc_inc.inc_faddr.s_addr; 1252 } 1253 add.lport = sc->sc_inc.inc_lport; 1254 add.fport = sc->sc_inc.inc_fport; 1255 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1256 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1257 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1258 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1259 MD5Add(add); 1260 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1261 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK); 1262 return (data); 1263 } 1264 1265 static struct syncache * 1266 syncookie_lookup(inc, th, so) 1267 struct in_conninfo *inc; 1268 struct tcphdr *th; 1269 struct socket *so; 1270 { 1271 u_int32_t md5_buffer[4]; 1272 struct syncache *sc; 1273 u_int32_t data; 1274 int wnd, idx; 1275 struct md5_add add; 1276 1277 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */ 1278 idx = data & SYNCOOKIE_WNDMASK; 1279 if (tcp_secret[idx].ts_expire < ticks || 1280 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) 1281 return (NULL); 1282 MD5Init(&syn_ctx); 1283 #ifdef INET6 1284 if (inc->inc_isipv6) { 1285 MD5Add(inc->inc6_laddr); 1286 MD5Add(inc->inc6_faddr); 1287 add.laddr = 0; 1288 add.faddr = 0; 1289 } else 1290 #endif 1291 { 1292 add.laddr = inc->inc_laddr.s_addr; 1293 add.faddr = inc->inc_faddr.s_addr; 1294 } 1295 add.lport = inc->inc_lport; 1296 add.fport = inc->inc_fport; 1297 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1298 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1299 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1300 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1301 MD5Add(add); 1302 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1303 data ^= md5_buffer[0]; 1304 if ((data & ~SYNCOOKIE_DATAMASK) != 0) 1305 return (NULL); 1306 data = data >> SYNCOOKIE_WNDBITS; 1307 1308 /* 1309 * This allocation is guaranteed to succeed because we 1310 * preallocate one more syncache entry than cache_limit. 1311 */ 1312 sc = zalloc(tcp_syncache.zone); 1313 1314 /* 1315 * Fill in the syncache values. 1316 * XXX duplicate code from syncache_add 1317 */ 1318 sc->sc_ipopts = NULL; 1319 sc->sc_inc.inc_fport = inc->inc_fport; 1320 sc->sc_inc.inc_lport = inc->inc_lport; 1321 #ifdef INET6 1322 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1323 if (inc->inc_isipv6) { 1324 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1325 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1326 sc->sc_route6.ro_rt = NULL; 1327 } else 1328 #endif 1329 { 1330 sc->sc_inc.inc_faddr = inc->inc_faddr; 1331 sc->sc_inc.inc_laddr = inc->inc_laddr; 1332 sc->sc_route.ro_rt = NULL; 1333 } 1334 sc->sc_irs = th->th_seq - 1; 1335 sc->sc_iss = th->th_ack - 1; 1336 wnd = sbspace(&so->so_rcv); 1337 wnd = imax(wnd, 0); 1338 wnd = imin(wnd, TCP_MAXWIN); 1339 sc->sc_wnd = wnd; 1340 sc->sc_flags = 0; 1341 sc->sc_rxtslot = 0; 1342 sc->sc_peer_mss = tcp_msstab[data]; 1343 return (sc); 1344 } 1345