1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * All advertising materials mentioning features or use of this software 36 * must display the following acknowledgement: 37 * This product includes software developed by Jeffrey M. Hsu. 38 * 39 * Copyright (c) 2001 Networks Associates Technologies, Inc. 40 * All rights reserved. 41 * 42 * This software was developed for the FreeBSD Project by Jonathan Lemon 43 * and NAI Labs, the Security Research Division of Network Associates, Inc. 44 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 45 * DARPA CHATS research program. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. The name of the author may not be used to endorse or promote 56 * products derived from this software without specific prior written 57 * permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * $FreeBSD: src/sys/netinet/tcp_syncache.c,v 1.5.2.14 2003/02/24 04:02:27 silby Exp $ 72 */ 73 74 #include "opt_inet.h" 75 #include "opt_inet6.h" 76 #include "opt_ipsec.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/sysctl.h> 82 #include <sys/malloc.h> 83 #include <sys/mbuf.h> 84 #include <sys/md5.h> 85 #include <sys/proc.h> /* for proc0 declaration */ 86 #include <sys/random.h> 87 #include <sys/socket.h> 88 #include <sys/socketvar.h> 89 #include <sys/in_cksum.h> 90 91 #include <sys/msgport2.h> 92 #include <net/netmsg2.h> 93 #include <net/netisr2.h> 94 95 #include <net/if.h> 96 #include <net/route.h> 97 98 #include <netinet/in.h> 99 #include <netinet/in_systm.h> 100 #include <netinet/ip.h> 101 #include <netinet/in_var.h> 102 #include <netinet/in_pcb.h> 103 #include <netinet/ip_var.h> 104 #include <netinet/ip6.h> 105 #ifdef INET6 106 #include <netinet/icmp6.h> 107 #include <netinet6/nd6.h> 108 #endif 109 #include <netinet6/ip6_var.h> 110 #include <netinet6/in6_pcb.h> 111 #include <netinet/tcp.h> 112 #include <netinet/tcp_fsm.h> 113 #include <netinet/tcp_seq.h> 114 #include <netinet/tcp_timer.h> 115 #include <netinet/tcp_timer2.h> 116 #include <netinet/tcp_var.h> 117 #include <netinet6/tcp6_var.h> 118 119 #ifdef IPSEC 120 #include <netinet6/ipsec.h> 121 #ifdef INET6 122 #include <netinet6/ipsec6.h> 123 #endif 124 #include <netproto/key/key.h> 125 #endif /*IPSEC*/ 126 127 #ifdef FAST_IPSEC 128 #include <netproto/ipsec/ipsec.h> 129 #ifdef INET6 130 #include <netproto/ipsec/ipsec6.h> 131 #endif 132 #include <netproto/ipsec/key.h> 133 #define IPSEC 134 #endif /*FAST_IPSEC*/ 135 136 static int tcp_syncookies = 1; 137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, 138 &tcp_syncookies, 0, 139 "Use TCP SYN cookies if the syncache overflows"); 140 141 static void syncache_drop(struct syncache *, struct syncache_head *); 142 static void syncache_free(struct syncache *); 143 static void syncache_insert(struct syncache *, struct syncache_head *); 144 static struct syncache *syncache_lookup(struct in_conninfo *, 145 struct syncache_head **); 146 static int syncache_respond(struct syncache *, struct mbuf *); 147 static struct socket *syncache_socket(struct syncache *, struct socket *, 148 struct mbuf *); 149 static void syncache_timer(void *); 150 static u_int32_t syncookie_generate(struct syncache *); 151 static struct syncache *syncookie_lookup(struct in_conninfo *, 152 struct tcphdr *, struct socket *); 153 154 /* 155 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 156 * 4 retransmits corresponds to a timeout of (3 + 3 + 3 + 3 + 3 == 15) seconds 157 * or (1 + 1 + 2 + 4 + 8 == 16) seconds if RFC6298 is used, the odds are that 158 * the user has given up attempting to connect by then. 159 */ 160 #define SYNCACHE_MAXREXMTS 4 161 162 /* Arbitrary values */ 163 #define TCP_SYNCACHE_HASHSIZE 512 164 #define TCP_SYNCACHE_BUCKETLIMIT 30 165 166 static void syncache_timer_handler(netmsg_t); 167 static int syncache_sysctl_count(SYSCTL_HANDLER_ARGS); 168 169 struct tcp_syncache { 170 u_int hashsize; 171 u_int hashmask; 172 u_int bucket_limit; 173 u_int cache_limit; 174 u_int rexmt_limit; 175 u_int hash_secret; 176 }; 177 static struct tcp_syncache tcp_syncache; 178 179 struct syncache_timerq { 180 TAILQ_HEAD(, syncache) list; 181 struct callout timeo; 182 struct netmsg_base nm; 183 }; 184 185 struct tcp_syncache_percpu { 186 struct syncache_head *hashbase; 187 u_int cache_count; 188 struct syncache_timerq timerq[SYNCACHE_MAXREXMTS + 1]; 189 }; 190 191 static struct tcp_syncache_percpu *tcp_syncache_percpu[MAXCPU]; 192 193 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); 194 195 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD, 196 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); 197 198 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD, 199 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); 200 201 SYSCTL_PROC(_net_inet_tcp_syncache, OID_AUTO, count, (CTLTYPE_INT | CTLFLAG_RD), 202 0, 0, syncache_sysctl_count, "I", "Current number of entries in syncache"); 203 204 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD, 205 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); 206 207 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, 208 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); 209 210 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 211 212 #define SYNCACHE_HASH(inc, mask) \ 213 ((tcp_syncache.hash_secret ^ \ 214 (inc)->inc_faddr.s_addr ^ \ 215 ((inc)->inc_faddr.s_addr >> 16) ^ \ 216 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 217 218 #define SYNCACHE_HASH6(inc, mask) \ 219 ((tcp_syncache.hash_secret ^ \ 220 (inc)->inc6_faddr.s6_addr32[0] ^ \ 221 (inc)->inc6_faddr.s6_addr32[3] ^ \ 222 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 223 224 #define ENDPTS_EQ(a, b) ( \ 225 (a)->ie_fport == (b)->ie_fport && \ 226 (a)->ie_lport == (b)->ie_lport && \ 227 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ 228 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ 229 ) 230 231 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) 232 233 static __inline int 234 syncache_rto(int slot) 235 { 236 if (tcp_low_rtobase) 237 return (TCPTV_RTOBASE * tcp_syn_backoff_low[slot]); 238 else 239 return (TCPTV_RTOBASE * tcp_syn_backoff[slot]); 240 } 241 242 static __inline void 243 syncache_timeout(struct tcp_syncache_percpu *syncache_percpu, 244 struct syncache *sc, int slot) 245 { 246 struct syncache_timerq *tq; 247 int rto; 248 249 KASSERT(slot <= SYNCACHE_MAXREXMTS, 250 ("syncache: invalid slot %d", slot)); 251 252 if (slot > 0) { 253 /* 254 * Record the time that we spent in SYN|ACK 255 * retransmition. 256 * 257 * Needed by RFC3390 and RFC6298. 258 */ 259 sc->sc_rxtused += syncache_rto(slot - 1); 260 } 261 sc->sc_rxtslot = slot; 262 263 rto = syncache_rto(slot); 264 sc->sc_rxttime = ticks + rto; 265 266 tq = &syncache_percpu->timerq[slot]; 267 TAILQ_INSERT_TAIL(&tq->list, sc, sc_timerq); 268 if (!callout_active(&tq->timeo)) 269 callout_reset(&tq->timeo, rto, syncache_timer, &tq->nm); 270 } 271 272 static void 273 syncache_free(struct syncache *sc) 274 { 275 struct rtentry *rt; 276 #ifdef INET6 277 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 278 #else 279 const boolean_t isipv6 = FALSE; 280 #endif 281 282 if (sc->sc_ipopts) 283 m_free(sc->sc_ipopts); 284 285 rt = isipv6 ? sc->sc_route6.ro_rt : sc->sc_route.ro_rt; 286 if (rt != NULL) { 287 /* 288 * If this is the only reference to a protocol-cloned 289 * route, remove it immediately. 290 */ 291 if ((rt->rt_flags & (RTF_WASCLONED | RTF_LLINFO)) == 292 RTF_WASCLONED && rt->rt_refcnt == 1) { 293 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 294 rt_mask(rt), rt->rt_flags, NULL); 295 } 296 RTFREE(rt); 297 } 298 kfree(sc, M_SYNCACHE); 299 } 300 301 static void 302 syncache_init_dispatch(netmsg_t nm) 303 { 304 struct tcp_syncache_percpu *syncache_percpu; 305 int i; 306 307 ASSERT_NETISR_NCPUS(mycpuid); 308 309 syncache_percpu = kmalloc(sizeof(*syncache_percpu), M_SYNCACHE, 310 M_WAITOK | M_ZERO); 311 312 /* Allocate the hash table. */ 313 syncache_percpu->hashbase = kmalloc(tcp_syncache.hashsize * 314 sizeof(struct syncache_head), 315 M_SYNCACHE, M_WAITOK | M_ZERO); 316 317 /* Initialize the hash buckets. */ 318 for (i = 0; i < tcp_syncache.hashsize; i++) { 319 struct syncache_head *bucket; 320 321 bucket = &syncache_percpu->hashbase[i]; 322 TAILQ_INIT(&bucket->sch_bucket); 323 bucket->sch_length = 0; 324 } 325 326 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) { 327 struct syncache_timerq *tq = 328 &syncache_percpu->timerq[i]; 329 330 /* Initialize the timer queues. */ 331 TAILQ_INIT(&tq->list); 332 callout_init_mp(&tq->timeo); 333 334 netmsg_init(&tq->nm, NULL, &netisr_adone_rport, 335 MSGF_PRIORITY, syncache_timer_handler); 336 tq->nm.lmsg.u.ms_result = i; 337 } 338 339 tcp_syncache_percpu[mycpuid] = syncache_percpu; 340 341 netisr_forwardmsg(&nm->base, mycpuid + 1); 342 } 343 344 void 345 syncache_init(void) 346 { 347 struct netmsg_base nm; 348 349 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 350 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 351 tcp_syncache.cache_limit = 352 tcp_syncache.hashsize * tcp_syncache.bucket_limit; 353 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 354 tcp_syncache.hash_secret = karc4random(); 355 356 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 357 &tcp_syncache.hashsize); 358 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 359 &tcp_syncache.cache_limit); 360 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 361 &tcp_syncache.bucket_limit); 362 if (!powerof2(tcp_syncache.hashsize)) { 363 kprintf("WARNING: syncache hash size is not a power of 2.\n"); 364 tcp_syncache.hashsize = 512; /* safe default */ 365 } 366 tcp_syncache.hashmask = tcp_syncache.hashsize - 1; 367 368 netmsg_init(&nm, NULL, &curthread->td_msgport, 0, 369 syncache_init_dispatch); 370 netisr_domsg_global(&nm); 371 } 372 373 static void 374 syncache_insert(struct syncache *sc, struct syncache_head *sch) 375 { 376 struct tcp_syncache_percpu *syncache_percpu; 377 struct syncache *sc2; 378 int i; 379 380 syncache_percpu = tcp_syncache_percpu[mycpu->gd_cpuid]; 381 382 /* 383 * Make sure that we don't overflow the per-bucket 384 * limit or the total cache size limit. 385 */ 386 if (sch->sch_length >= tcp_syncache.bucket_limit) { 387 /* 388 * The bucket is full, toss the oldest element. 389 */ 390 sc2 = TAILQ_FIRST(&sch->sch_bucket); 391 if (sc2->sc_tp != NULL) 392 sc2->sc_tp->ts_recent = ticks; 393 syncache_drop(sc2, sch); 394 tcpstat.tcps_sc_bucketoverflow++; 395 } else if (syncache_percpu->cache_count >= tcp_syncache.cache_limit) { 396 /* 397 * The cache is full. Toss the oldest entry in the 398 * entire cache. This is the front entry in the 399 * first non-empty timer queue with the largest 400 * timeout value. 401 */ 402 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { 403 sc2 = TAILQ_FIRST(&syncache_percpu->timerq[i].list); 404 if (sc2 != NULL) 405 break; 406 } 407 if (sc2->sc_tp != NULL) 408 sc2->sc_tp->ts_recent = ticks; 409 syncache_drop(sc2, NULL); 410 tcpstat.tcps_sc_cacheoverflow++; 411 } 412 413 /* Initialize the entry's timer. */ 414 syncache_timeout(syncache_percpu, sc, 0); 415 416 /* Put it into the bucket. */ 417 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash); 418 sch->sch_length++; 419 syncache_percpu->cache_count++; 420 tcpstat.tcps_sc_added++; 421 } 422 423 void 424 syncache_destroy(struct tcpcb *tp, struct tcpcb *tp_inh) 425 { 426 struct tcp_syncache_percpu *syncache_percpu; 427 int i; 428 429 ASSERT_NETISR_NCPUS(mycpuid); 430 431 syncache_percpu = tcp_syncache_percpu[mycpu->gd_cpuid]; 432 for (i = 0; i < tcp_syncache.hashsize; i++) { 433 struct syncache_head *bucket; 434 struct syncache *sc; 435 436 bucket = &syncache_percpu->hashbase[i]; 437 TAILQ_FOREACH(sc, &bucket->sch_bucket, sc_hash) { 438 if (sc->sc_tp == tp) 439 sc->sc_tp = tp_inh; 440 } 441 } 442 } 443 444 static void 445 syncache_drop(struct syncache *sc, struct syncache_head *sch) 446 { 447 struct tcp_syncache_percpu *syncache_percpu; 448 #ifdef INET6 449 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 450 #else 451 const boolean_t isipv6 = FALSE; 452 #endif 453 454 syncache_percpu = tcp_syncache_percpu[mycpu->gd_cpuid]; 455 456 if (sch == NULL) { 457 if (isipv6) { 458 sch = &syncache_percpu->hashbase[ 459 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; 460 } else { 461 sch = &syncache_percpu->hashbase[ 462 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)]; 463 } 464 } 465 466 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 467 sch->sch_length--; 468 syncache_percpu->cache_count--; 469 470 /* 471 * Cleanup 472 */ 473 sc->sc_tp = NULL; 474 475 /* 476 * Remove the entry from the syncache timer/timeout queue. Note 477 * that we do not try to stop any running timer since we do not know 478 * whether the timer's message is in-transit or not. Since timeouts 479 * are fairly long, taking an unneeded callout does not detrimentally 480 * effect performance. 481 */ 482 TAILQ_REMOVE(&syncache_percpu->timerq[sc->sc_rxtslot].list, sc, 483 sc_timerq); 484 485 syncache_free(sc); 486 } 487 488 /* 489 * Place a timeout message on the TCP thread's message queue. 490 * This routine runs in soft interrupt context. 491 * 492 * An invariant is for this routine to be called, the callout must 493 * have been active. Note that the callout is not deactivated until 494 * after the message has been processed in syncache_timer_handler() below. 495 */ 496 static void 497 syncache_timer(void *p) 498 { 499 struct netmsg_base *msg = p; 500 501 KKASSERT(mycpuid < netisr_ncpus); 502 503 crit_enter(); 504 if (msg->lmsg.ms_flags & MSGF_DONE) 505 netisr_sendmsg_oncpu(msg); 506 crit_exit(); 507 } 508 509 /* 510 * Service a timer message queued by timer expiration. 511 * This routine runs in the TCP protocol thread. 512 * 513 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 514 * If we have retransmitted an entry the maximum number of times, expire it. 515 * 516 * When we finish processing timed-out entries, we restart the timer if there 517 * are any entries still on the queue and deactivate it otherwise. Only after 518 * a timer has been deactivated here can it be restarted by syncache_timeout(). 519 */ 520 static void 521 syncache_timer_handler(netmsg_t msg) 522 { 523 struct tcp_syncache_percpu *syncache_percpu; 524 struct syncache *nsc; 525 struct syncache_timerq *tq; 526 int slot; 527 528 ASSERT_NETISR_NCPUS(mycpuid); 529 530 /* Reply ASAP. */ 531 crit_enter(); 532 netisr_replymsg(&msg->base, 0); 533 crit_exit(); 534 535 syncache_percpu = tcp_syncache_percpu[mycpu->gd_cpuid]; 536 537 slot = msg->lmsg.u.ms_result; 538 KASSERT(slot <= SYNCACHE_MAXREXMTS, 539 ("syncache: invalid slot %d", slot)); 540 tq = &syncache_percpu->timerq[slot]; 541 542 nsc = TAILQ_FIRST(&tq->list); 543 while (nsc != NULL) { 544 struct syncache *sc; 545 546 if (ticks < nsc->sc_rxttime) 547 break; /* finished because timerq sorted by time */ 548 549 sc = nsc; 550 if (sc->sc_tp == NULL) { 551 nsc = TAILQ_NEXT(sc, sc_timerq); 552 syncache_drop(sc, NULL); 553 tcpstat.tcps_sc_stale++; 554 continue; 555 } 556 if (slot == SYNCACHE_MAXREXMTS || 557 slot >= tcp_syncache.rexmt_limit || 558 sc->sc_tp->t_inpcb->inp_gencnt != sc->sc_inp_gencnt) { 559 nsc = TAILQ_NEXT(sc, sc_timerq); 560 syncache_drop(sc, NULL); 561 tcpstat.tcps_sc_stale++; 562 continue; 563 } 564 /* 565 * syncache_respond() may call back into the syncache to 566 * to modify another entry, so do not obtain the next 567 * entry on the timer chain until it has completed. 568 */ 569 syncache_respond(sc, NULL); 570 tcpstat.tcps_sc_retransmitted++; 571 nsc = TAILQ_NEXT(sc, sc_timerq); 572 TAILQ_REMOVE(&tq->list, sc, sc_timerq); 573 syncache_timeout(syncache_percpu, sc, slot + 1); 574 } 575 576 if (nsc != NULL) { 577 callout_reset(&tq->timeo, nsc->sc_rxttime - ticks, 578 syncache_timer, &tq->nm); 579 } else { 580 callout_deactivate(&tq->timeo); 581 } 582 } 583 584 /* 585 * Find an entry in the syncache. 586 */ 587 static struct syncache * 588 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp) 589 { 590 struct tcp_syncache_percpu *syncache_percpu; 591 struct syncache *sc; 592 struct syncache_head *sch; 593 594 syncache_percpu = tcp_syncache_percpu[mycpu->gd_cpuid]; 595 #ifdef INET6 596 if (inc->inc_isipv6) { 597 sch = &syncache_percpu->hashbase[ 598 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; 599 *schp = sch; 600 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) 601 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 602 return (sc); 603 } else 604 #endif 605 { 606 sch = &syncache_percpu->hashbase[ 607 SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; 608 *schp = sch; 609 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 610 #ifdef INET6 611 if (sc->sc_inc.inc_isipv6) 612 continue; 613 #endif 614 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 615 return (sc); 616 } 617 } 618 return (NULL); 619 } 620 621 /* 622 * This function is called when we get a RST for a 623 * non-existent connection, so that we can see if the 624 * connection is in the syn cache. If it is, zap it. 625 */ 626 void 627 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th) 628 { 629 struct syncache *sc; 630 struct syncache_head *sch; 631 632 ASSERT_NETISR_NCPUS(mycpuid); 633 634 sc = syncache_lookup(inc, &sch); 635 if (sc == NULL) { 636 return; 637 } 638 /* 639 * If the RST bit is set, check the sequence number to see 640 * if this is a valid reset segment. 641 * RFC 793 page 37: 642 * In all states except SYN-SENT, all reset (RST) segments 643 * are validated by checking their SEQ-fields. A reset is 644 * valid if its sequence number is in the window. 645 * 646 * The sequence number in the reset segment is normally an 647 * echo of our outgoing acknowlegement numbers, but some hosts 648 * send a reset with the sequence number at the rightmost edge 649 * of our receive window, and we have to handle this case. 650 */ 651 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 652 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 653 syncache_drop(sc, sch); 654 tcpstat.tcps_sc_reset++; 655 } 656 } 657 658 void 659 syncache_badack(struct in_conninfo *inc) 660 { 661 struct syncache *sc; 662 struct syncache_head *sch; 663 664 ASSERT_NETISR_NCPUS(mycpuid); 665 666 sc = syncache_lookup(inc, &sch); 667 if (sc != NULL) { 668 syncache_drop(sc, sch); 669 tcpstat.tcps_sc_badack++; 670 } 671 } 672 673 void 674 syncache_unreach(struct in_conninfo *inc, const struct tcphdr *th) 675 { 676 struct syncache *sc; 677 struct syncache_head *sch; 678 679 ASSERT_NETISR_NCPUS(mycpuid); 680 681 /* we are called at splnet() here */ 682 sc = syncache_lookup(inc, &sch); 683 if (sc == NULL) 684 return; 685 686 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 687 if (ntohl(th->th_seq) != sc->sc_iss) 688 return; 689 690 /* 691 * If we've rertransmitted 3 times and this is our second error, 692 * we remove the entry. Otherwise, we allow it to continue on. 693 * This prevents us from incorrectly nuking an entry during a 694 * spurious network outage. 695 * 696 * See tcp_notify(). 697 */ 698 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) { 699 sc->sc_flags |= SCF_UNREACH; 700 return; 701 } 702 syncache_drop(sc, sch); 703 tcpstat.tcps_sc_unreach++; 704 } 705 706 /* 707 * Build a new TCP socket structure from a syncache entry. 708 * 709 * This is called from the context of the SYN+ACK 710 */ 711 static struct socket * 712 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m) 713 { 714 struct inpcb *inp = NULL, *linp; 715 struct socket *so; 716 struct tcpcb *tp, *ltp; 717 lwkt_port_t port; 718 #ifdef INET6 719 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 720 #else 721 const boolean_t isipv6 = FALSE; 722 #endif 723 struct sockaddr_in sin_faddr; 724 struct sockaddr_in6 sin6_faddr; 725 struct sockaddr *faddr; 726 727 KASSERT(m->m_flags & M_HASH, ("mbuf has no hash")); 728 729 if (isipv6) { 730 faddr = (struct sockaddr *)&sin6_faddr; 731 sin6_faddr.sin6_family = AF_INET6; 732 sin6_faddr.sin6_len = sizeof(sin6_faddr); 733 sin6_faddr.sin6_addr = sc->sc_inc.inc6_faddr; 734 sin6_faddr.sin6_port = sc->sc_inc.inc_fport; 735 sin6_faddr.sin6_flowinfo = sin6_faddr.sin6_scope_id = 0; 736 } else { 737 faddr = (struct sockaddr *)&sin_faddr; 738 sin_faddr.sin_family = AF_INET; 739 sin_faddr.sin_len = sizeof(sin_faddr); 740 sin_faddr.sin_addr = sc->sc_inc.inc_faddr; 741 sin_faddr.sin_port = sc->sc_inc.inc_fport; 742 bzero(sin_faddr.sin_zero, sizeof(sin_faddr.sin_zero)); 743 } 744 745 /* 746 * Ok, create the full blown connection, and set things up 747 * as they would have been set up if we had created the 748 * connection when the SYN arrived. If we can't create 749 * the connection, abort it. 750 * 751 * Set the protocol processing port for the socket to the current 752 * port (that the connection came in on). 753 * 754 * NOTE: 755 * We don't keep a reference on the new socket, since its 756 * destruction will run in this thread (netisrN); there is no 757 * race here. 758 */ 759 so = sonewconn_faddr(lso, SS_ISCONNECTED, faddr, 760 FALSE /* don't ref */); 761 if (so == NULL) { 762 /* 763 * Drop the connection; we will send a RST if the peer 764 * retransmits the ACK, 765 */ 766 tcpstat.tcps_listendrop++; 767 goto abort; 768 } 769 770 /* 771 * Insert new socket into hash list. 772 */ 773 inp = so->so_pcb; 774 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; 775 if (isipv6) { 776 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 777 } else { 778 KASSERT(INP_ISIPV4(inp), ("not inet pcb")); 779 inp->inp_laddr = sc->sc_inc.inc_laddr; 780 } 781 inp->inp_lport = sc->sc_inc.inc_lport; 782 783 linp = lso->so_pcb; 784 ltp = intotcpcb(linp); 785 786 tcp_pcbport_insert(ltp, inp); 787 788 #ifdef IPSEC 789 /* copy old policy into new socket's */ 790 if (ipsec_copy_policy(linp->inp_sp, inp->inp_sp)) 791 kprintf("syncache_expand: could not copy policy\n"); 792 #endif 793 if (isipv6) { 794 struct in6_addr laddr6; 795 /* 796 * Inherit socket options from the listening socket. 797 * Note that in6p_inputopts are not (and should not be) 798 * copied, since it stores previously received options and is 799 * used to detect if each new option is different than the 800 * previous one and hence should be passed to a user. 801 * If we copied in6p_inputopts, a user would not be able to 802 * receive options just after calling the accept system call. 803 */ 804 inp->inp_flags |= linp->inp_flags & INP_CONTROLOPTS; 805 if (linp->in6p_outputopts) 806 inp->in6p_outputopts = 807 ip6_copypktopts(linp->in6p_outputopts, M_INTWAIT); 808 inp->in6p_route = sc->sc_route6; 809 sc->sc_route6.ro_rt = NULL; 810 811 laddr6 = inp->in6p_laddr; 812 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 813 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 814 if (in6_pcbconnect(inp, faddr, &thread0)) { 815 inp->in6p_laddr = laddr6; 816 goto abort; 817 } 818 port = tcp6_addrport(); 819 } else { 820 struct in_addr laddr; 821 822 inp->inp_options = ip_srcroute(m); 823 if (inp->inp_options == NULL) { 824 inp->inp_options = sc->sc_ipopts; 825 sc->sc_ipopts = NULL; 826 } 827 inp->inp_route = sc->sc_route; 828 sc->sc_route.ro_rt = NULL; 829 830 laddr = inp->inp_laddr; 831 if (inp->inp_laddr.s_addr == INADDR_ANY) 832 inp->inp_laddr = sc->sc_inc.inc_laddr; 833 if (in_pcbconnect(inp, faddr, &thread0)) { 834 inp->inp_laddr = laddr; 835 goto abort; 836 } 837 838 inp->inp_flags |= INP_HASH; 839 inp->inp_hashval = m->m_pkthdr.hash; 840 port = netisr_hashport(inp->inp_hashval); 841 } 842 843 /* 844 * The current port should be in the context of the SYN+ACK and 845 * so should match the tcp address port. 846 */ 847 KASSERT(port == &curthread->td_msgport, 848 ("TCP PORT MISMATCH %p vs %p\n", port, &curthread->td_msgport)); 849 850 tp = intotcpcb(inp); 851 TCP_STATE_CHANGE(tp, TCPS_SYN_RECEIVED); 852 tp->iss = sc->sc_iss; 853 tp->irs = sc->sc_irs; 854 tcp_rcvseqinit(tp); 855 tcp_sendseqinit(tp); 856 tp->snd_wnd = sc->sc_sndwnd; 857 tp->snd_wl1 = sc->sc_irs; 858 tp->rcv_up = sc->sc_irs + 1; 859 tp->rcv_wnd = sc->sc_wnd; 860 tp->rcv_adv += tp->rcv_wnd; 861 862 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH | TF_NODELAY); 863 if (sc->sc_flags & SCF_NOOPT) 864 tp->t_flags |= TF_NOOPT; 865 if (sc->sc_flags & SCF_WINSCALE) { 866 tp->t_flags |= TF_REQ_SCALE | TF_RCVD_SCALE; 867 tp->snd_scale = sc->sc_requested_s_scale; 868 tp->request_r_scale = sc->sc_request_r_scale; 869 } 870 if (sc->sc_flags & SCF_TIMESTAMP) { 871 tp->t_flags |= TF_REQ_TSTMP | TF_RCVD_TSTMP; 872 tp->ts_recent = sc->sc_tsrecent; 873 tp->ts_recent_age = ticks; 874 } 875 if (sc->sc_flags & SCF_SACK_PERMITTED) 876 tp->t_flags |= TF_SACK_PERMITTED; 877 878 #ifdef TCP_SIGNATURE 879 if (sc->sc_flags & SCF_SIGNATURE) 880 tp->t_flags |= TF_SIGNATURE; 881 #endif /* TCP_SIGNATURE */ 882 883 tp->t_rxtsyn = sc->sc_rxtused; 884 tcp_rmx_init(tp, sc->sc_peer_mss); 885 886 /* 887 * Inherit some properties from the listen socket 888 */ 889 tp->t_keepinit = ltp->t_keepinit; 890 tp->t_keepidle = ltp->t_keepidle; 891 tp->t_keepintvl = ltp->t_keepintvl; 892 tp->t_keepcnt = ltp->t_keepcnt; 893 tp->t_maxidle = ltp->t_maxidle; 894 895 tcp_create_timermsg(tp, port); 896 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepinit, tcp_timer_keep); 897 898 tcpstat.tcps_accepts++; 899 return (so); 900 901 abort: 902 if (so != NULL) 903 soabort_direct(so); 904 return (NULL); 905 } 906 907 /* 908 * This function gets called when we receive an ACK for a 909 * socket in the LISTEN state. We look up the connection 910 * in the syncache, and if its there, we pull it out of 911 * the cache and turn it into a full-blown connection in 912 * the SYN-RECEIVED state. 913 */ 914 int 915 syncache_expand(struct in_conninfo *inc, struct tcphdr *th, struct socket **sop, 916 struct mbuf *m) 917 { 918 struct syncache *sc; 919 struct syncache_head *sch; 920 struct socket *so; 921 922 ASSERT_NETISR_NCPUS(mycpuid); 923 924 sc = syncache_lookup(inc, &sch); 925 if (sc == NULL) { 926 /* 927 * There is no syncache entry, so see if this ACK is 928 * a returning syncookie. To do this, first: 929 * A. See if this socket has had a syncache entry dropped in 930 * the past. We don't want to accept a bogus syncookie 931 * if we've never received a SYN. 932 * B. check that the syncookie is valid. If it is, then 933 * cobble up a fake syncache entry, and return. 934 */ 935 if (!tcp_syncookies) 936 return (0); 937 sc = syncookie_lookup(inc, th, *sop); 938 if (sc == NULL) 939 return (0); 940 sch = NULL; 941 tcpstat.tcps_sc_recvcookie++; 942 } 943 944 /* 945 * If seg contains an ACK, but not for our SYN/ACK, send a RST. 946 */ 947 if (th->th_ack != sc->sc_iss + 1) 948 return (0); 949 950 so = syncache_socket(sc, *sop, m); 951 if (so == NULL) { 952 #if 0 953 resetandabort: 954 /* XXXjlemon check this - is this correct? */ 955 tcp_respond(NULL, m, m, th, 956 th->th_seq + tlen, (tcp_seq)0, TH_RST | TH_ACK); 957 #endif 958 m_freem(m); /* XXX only needed for above */ 959 tcpstat.tcps_sc_aborted++; 960 } else { 961 tcpstat.tcps_sc_completed++; 962 } 963 if (sch == NULL) 964 syncache_free(sc); 965 else 966 syncache_drop(sc, sch); 967 *sop = so; 968 return (1); 969 } 970 971 /* 972 * Given a LISTEN socket and an inbound SYN request, add 973 * this to the syn cache, and send back a segment: 974 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 975 * to the source. 976 * 977 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 978 * Doing so would require that we hold onto the data and deliver it 979 * to the application. However, if we are the target of a SYN-flood 980 * DoS attack, an attacker could send data which would eventually 981 * consume all available buffer space if it were ACKed. By not ACKing 982 * the data, we avoid this DoS scenario. 983 */ 984 int 985 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 986 struct socket *so, struct mbuf *m) 987 { 988 struct tcp_syncache_percpu *syncache_percpu; 989 struct tcpcb *tp; 990 struct syncache *sc = NULL; 991 struct syncache_head *sch; 992 struct mbuf *ipopts = NULL; 993 int win; 994 995 ASSERT_NETISR_NCPUS(mycpuid); 996 KASSERT(m->m_flags & M_HASH, ("mbuf has no hash")); 997 998 syncache_percpu = tcp_syncache_percpu[mycpu->gd_cpuid]; 999 tp = sototcpcb(so); 1000 1001 /* 1002 * Remember the IP options, if any. 1003 */ 1004 #ifdef INET6 1005 if (!inc->inc_isipv6) 1006 #endif 1007 ipopts = ip_srcroute(m); 1008 1009 /* 1010 * See if we already have an entry for this connection. 1011 * If we do, resend the SYN,ACK, and reset the retransmit timer. 1012 * 1013 * XXX 1014 * The syncache should be re-initialized with the contents 1015 * of the new SYN which may have different options. 1016 */ 1017 sc = syncache_lookup(inc, &sch); 1018 if (sc != NULL) { 1019 KASSERT(sc->sc_flags & SCF_HASH, ("syncache has no hash")); 1020 KASSERT(sc->sc_hashval == m->m_pkthdr.hash, 1021 ("syncache/mbuf hash mismatches")); 1022 1023 tcpstat.tcps_sc_dupsyn++; 1024 if (ipopts) { 1025 /* 1026 * If we were remembering a previous source route, 1027 * forget it and use the new one we've been given. 1028 */ 1029 if (sc->sc_ipopts) 1030 m_free(sc->sc_ipopts); 1031 sc->sc_ipopts = ipopts; 1032 } 1033 /* 1034 * Update timestamp if present. 1035 */ 1036 if (sc->sc_flags & SCF_TIMESTAMP) 1037 sc->sc_tsrecent = to->to_tsval; 1038 1039 /* Just update the TOF_SACK_PERMITTED for now. */ 1040 if (tcp_do_sack && (to->to_flags & TOF_SACK_PERMITTED)) 1041 sc->sc_flags |= SCF_SACK_PERMITTED; 1042 else 1043 sc->sc_flags &= ~SCF_SACK_PERMITTED; 1044 1045 /* Update initial send window */ 1046 sc->sc_sndwnd = th->th_win; 1047 1048 /* 1049 * PCB may have changed, pick up new values. 1050 */ 1051 sc->sc_tp = tp; 1052 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 1053 if (syncache_respond(sc, m) == 0) { 1054 TAILQ_REMOVE( 1055 &syncache_percpu->timerq[sc->sc_rxtslot].list, 1056 sc, sc_timerq); 1057 syncache_timeout(syncache_percpu, sc, sc->sc_rxtslot); 1058 tcpstat.tcps_sndacks++; 1059 tcpstat.tcps_sndtotal++; 1060 } 1061 return (1); 1062 } 1063 1064 /* 1065 * Fill in the syncache values. 1066 */ 1067 sc = kmalloc(sizeof(struct syncache), M_SYNCACHE, M_WAITOK|M_ZERO); 1068 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 1069 sc->sc_ipopts = ipopts; 1070 sc->sc_inc.inc_fport = inc->inc_fport; 1071 sc->sc_inc.inc_lport = inc->inc_lport; 1072 sc->sc_tp = tp; 1073 #ifdef INET6 1074 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1075 if (inc->inc_isipv6) { 1076 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1077 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1078 sc->sc_route6.ro_rt = NULL; 1079 } else 1080 #endif 1081 { 1082 sc->sc_inc.inc_faddr = inc->inc_faddr; 1083 sc->sc_inc.inc_laddr = inc->inc_laddr; 1084 sc->sc_route.ro_rt = NULL; 1085 } 1086 sc->sc_irs = th->th_seq; 1087 sc->sc_flags = SCF_HASH; 1088 sc->sc_hashval = m->m_pkthdr.hash; 1089 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0; 1090 if (tcp_syncookies) 1091 sc->sc_iss = syncookie_generate(sc); 1092 else 1093 sc->sc_iss = karc4random(); 1094 1095 /* Initial receive window: clip ssb_space to [0 .. TCP_MAXWIN] */ 1096 win = ssb_space(&so->so_rcv); 1097 win = imax(win, 0); 1098 win = imin(win, TCP_MAXWIN); 1099 sc->sc_wnd = win; 1100 1101 if (tcp_do_rfc1323) { 1102 /* 1103 * A timestamp received in a SYN makes 1104 * it ok to send timestamp requests and replies. 1105 */ 1106 if (to->to_flags & TOF_TS) { 1107 sc->sc_tsrecent = to->to_tsval; 1108 sc->sc_flags |= SCF_TIMESTAMP; 1109 } 1110 if (to->to_flags & TOF_SCALE) { 1111 int wscale = TCP_MIN_WINSHIFT; 1112 1113 /* Compute proper scaling value from buffer space */ 1114 while (wscale < TCP_MAX_WINSHIFT && 1115 (TCP_MAXWIN << wscale) < so->so_rcv.ssb_hiwat) { 1116 wscale++; 1117 } 1118 sc->sc_request_r_scale = wscale; 1119 sc->sc_requested_s_scale = to->to_requested_s_scale; 1120 sc->sc_flags |= SCF_WINSCALE; 1121 } 1122 } 1123 if (tcp_do_sack && (to->to_flags & TOF_SACK_PERMITTED)) 1124 sc->sc_flags |= SCF_SACK_PERMITTED; 1125 if (tp->t_flags & TF_NOOPT) 1126 sc->sc_flags = SCF_NOOPT; 1127 #ifdef TCP_SIGNATURE 1128 /* 1129 * If listening socket requested TCP digests, and received SYN 1130 * contains the option, flag this in the syncache so that 1131 * syncache_respond() will do the right thing with the SYN+ACK. 1132 * XXX Currently we always record the option by default and will 1133 * attempt to use it in syncache_respond(). 1134 */ 1135 if (to->to_flags & TOF_SIGNATURE) 1136 sc->sc_flags = SCF_SIGNATURE; 1137 #endif /* TCP_SIGNATURE */ 1138 sc->sc_sndwnd = th->th_win; 1139 1140 if (syncache_respond(sc, m) == 0) { 1141 syncache_insert(sc, sch); 1142 tcpstat.tcps_sndacks++; 1143 tcpstat.tcps_sndtotal++; 1144 } else { 1145 syncache_free(sc); 1146 tcpstat.tcps_sc_dropped++; 1147 } 1148 return (1); 1149 } 1150 1151 static int 1152 syncache_respond(struct syncache *sc, struct mbuf *m) 1153 { 1154 u_int8_t *optp; 1155 int optlen, error; 1156 u_int16_t tlen, hlen, mssopt; 1157 struct ip *ip = NULL; 1158 struct rtentry *rt; 1159 struct tcphdr *th; 1160 struct ip6_hdr *ip6 = NULL; 1161 #ifdef INET6 1162 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 1163 #else 1164 const boolean_t isipv6 = FALSE; 1165 #endif 1166 1167 if (isipv6) { 1168 rt = tcp_rtlookup6(&sc->sc_inc); 1169 if (rt != NULL) 1170 mssopt = rt->rt_ifp->if_mtu - 1171 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); 1172 else 1173 mssopt = tcp_v6mssdflt; 1174 hlen = sizeof(struct ip6_hdr); 1175 } else { 1176 rt = tcp_rtlookup(&sc->sc_inc); 1177 if (rt != NULL) 1178 mssopt = rt->rt_ifp->if_mtu - 1179 (sizeof(struct ip) + sizeof(struct tcphdr)); 1180 else 1181 mssopt = tcp_mssdflt; 1182 hlen = sizeof(struct ip); 1183 } 1184 1185 /* Compute the size of the TCP options. */ 1186 if (sc->sc_flags & SCF_NOOPT) { 1187 optlen = 0; 1188 } else { 1189 optlen = TCPOLEN_MAXSEG + 1190 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) + 1191 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) + 1192 ((sc->sc_flags & SCF_SACK_PERMITTED) ? 1193 TCPOLEN_SACK_PERMITTED_ALIGNED : 0); 1194 #ifdef TCP_SIGNATURE 1195 optlen += ((sc->sc_flags & SCF_SIGNATURE) ? 1196 (TCPOLEN_SIGNATURE + 2) : 0); 1197 #endif /* TCP_SIGNATURE */ 1198 } 1199 tlen = hlen + sizeof(struct tcphdr) + optlen; 1200 1201 /* 1202 * XXX 1203 * assume that the entire packet will fit in a header mbuf 1204 */ 1205 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small")); 1206 1207 /* 1208 * XXX shouldn't this reuse the mbuf if possible ? 1209 * Create the IP+TCP header from scratch. 1210 */ 1211 if (m) 1212 m_freem(m); 1213 1214 m = m_gethdr(M_NOWAIT, MT_HEADER); 1215 if (m == NULL) 1216 return (ENOBUFS); 1217 m->m_data += max_linkhdr; 1218 m->m_len = tlen; 1219 m->m_pkthdr.len = tlen; 1220 m->m_pkthdr.rcvif = NULL; 1221 if (tcp_prio_synack) 1222 m->m_flags |= M_PRIO; 1223 1224 if (isipv6) { 1225 ip6 = mtod(m, struct ip6_hdr *); 1226 ip6->ip6_vfc = IPV6_VERSION; 1227 ip6->ip6_nxt = IPPROTO_TCP; 1228 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1229 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1230 ip6->ip6_plen = htons(tlen - hlen); 1231 /* ip6_hlim is set after checksum */ 1232 /* ip6_flow = ??? */ 1233 1234 th = (struct tcphdr *)(ip6 + 1); 1235 } else { 1236 ip = mtod(m, struct ip *); 1237 ip->ip_v = IPVERSION; 1238 ip->ip_hl = sizeof(struct ip) >> 2; 1239 ip->ip_len = tlen; 1240 ip->ip_id = 0; 1241 ip->ip_off = 0; 1242 ip->ip_sum = 0; 1243 ip->ip_p = IPPROTO_TCP; 1244 ip->ip_src = sc->sc_inc.inc_laddr; 1245 ip->ip_dst = sc->sc_inc.inc_faddr; 1246 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */ 1247 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */ 1248 1249 /* 1250 * See if we should do MTU discovery. Route lookups are 1251 * expensive, so we will only unset the DF bit if: 1252 * 1253 * 1) path_mtu_discovery is disabled 1254 * 2) the SCF_UNREACH flag has been set 1255 */ 1256 if (path_mtu_discovery 1257 && ((sc->sc_flags & SCF_UNREACH) == 0)) { 1258 ip->ip_off |= IP_DF; 1259 } 1260 1261 th = (struct tcphdr *)(ip + 1); 1262 } 1263 th->th_sport = sc->sc_inc.inc_lport; 1264 th->th_dport = sc->sc_inc.inc_fport; 1265 1266 th->th_seq = htonl(sc->sc_iss); 1267 th->th_ack = htonl(sc->sc_irs + 1); 1268 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1269 th->th_x2 = 0; 1270 th->th_flags = TH_SYN | TH_ACK; 1271 th->th_win = htons(sc->sc_wnd); 1272 th->th_urp = 0; 1273 1274 /* Tack on the TCP options. */ 1275 if (optlen == 0) 1276 goto no_options; 1277 optp = (u_int8_t *)(th + 1); 1278 *optp++ = TCPOPT_MAXSEG; 1279 *optp++ = TCPOLEN_MAXSEG; 1280 *optp++ = (mssopt >> 8) & 0xff; 1281 *optp++ = mssopt & 0xff; 1282 1283 if (sc->sc_flags & SCF_WINSCALE) { 1284 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | 1285 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | 1286 sc->sc_request_r_scale); 1287 optp += 4; 1288 } 1289 1290 if (sc->sc_flags & SCF_TIMESTAMP) { 1291 u_int32_t *lp = (u_int32_t *)(optp); 1292 1293 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1294 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1295 *lp++ = htonl(ticks); 1296 *lp = htonl(sc->sc_tsrecent); 1297 optp += TCPOLEN_TSTAMP_APPA; 1298 } 1299 1300 #ifdef TCP_SIGNATURE 1301 /* 1302 * Handle TCP-MD5 passive opener response. 1303 */ 1304 if (sc->sc_flags & SCF_SIGNATURE) { 1305 u_int8_t *bp = optp; 1306 int i; 1307 1308 *bp++ = TCPOPT_SIGNATURE; 1309 *bp++ = TCPOLEN_SIGNATURE; 1310 for (i = 0; i < TCP_SIGLEN; i++) 1311 *bp++ = 0; 1312 tcpsignature_compute(m, 0, optlen, 1313 optp + 2, IPSEC_DIR_OUTBOUND); 1314 *bp++ = TCPOPT_NOP; 1315 *bp++ = TCPOPT_EOL; 1316 optp += TCPOLEN_SIGNATURE + 2; 1317 } 1318 #endif /* TCP_SIGNATURE */ 1319 1320 if (sc->sc_flags & SCF_SACK_PERMITTED) { 1321 *((u_int32_t *)optp) = htonl(TCPOPT_SACK_PERMITTED_ALIGNED); 1322 optp += TCPOLEN_SACK_PERMITTED_ALIGNED; 1323 } 1324 1325 no_options: 1326 if (isipv6) { 1327 struct route_in6 *ro6 = &sc->sc_route6; 1328 1329 th->th_sum = 0; 1330 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); 1331 ip6->ip6_hlim = in6_selecthlim(NULL, 1332 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL); 1333 error = ip6_output(m, NULL, ro6, 0, NULL, NULL, 1334 sc->sc_tp->t_inpcb); 1335 } else { 1336 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1337 htons(tlen - hlen + IPPROTO_TCP)); 1338 m->m_pkthdr.csum_flags = CSUM_TCP; 1339 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1340 m->m_pkthdr.csum_thlen = sizeof(struct tcphdr) + optlen; 1341 KASSERT(sc->sc_flags & SCF_HASH, ("syncache has no hash")); 1342 m_sethash(m, sc->sc_hashval); 1343 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 1344 IP_DEBUGROUTE, NULL, sc->sc_tp->t_inpcb); 1345 } 1346 return (error); 1347 } 1348 1349 /* 1350 * cookie layers: 1351 * 1352 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .| 1353 * | peer iss | 1354 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .| 1355 * | 0 |(A)| | 1356 * (A): peer mss index 1357 */ 1358 1359 /* 1360 * The values below are chosen to minimize the size of the tcp_secret 1361 * table, as well as providing roughly a 16 second lifetime for the cookie. 1362 */ 1363 1364 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */ 1365 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */ 1366 1367 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1) 1368 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS) 1369 #define SYNCOOKIE_TIMEOUT \ 1370 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT)) 1371 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK) 1372 1373 static struct { 1374 u_int32_t ts_secbits[4]; 1375 u_int ts_expire; 1376 } tcp_secret[SYNCOOKIE_NSECRETS]; 1377 1378 static int tcp_msstab[] = { 0, 536, 1460, 8960 }; 1379 1380 static MD5_CTX syn_ctx; 1381 1382 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v)) 1383 1384 struct md5_add { 1385 u_int32_t laddr, faddr; 1386 u_int32_t secbits[4]; 1387 u_int16_t lport, fport; 1388 }; 1389 1390 #ifdef CTASSERT 1391 CTASSERT(sizeof(struct md5_add) == 28); 1392 #endif 1393 1394 /* 1395 * Consider the problem of a recreated (and retransmitted) cookie. If the 1396 * original SYN was accepted, the connection is established. The second 1397 * SYN is inflight, and if it arrives with an ISN that falls within the 1398 * receive window, the connection is killed. 1399 * 1400 * However, since cookies have other problems, this may not be worth 1401 * worrying about. 1402 */ 1403 1404 static u_int32_t 1405 syncookie_generate(struct syncache *sc) 1406 { 1407 u_int32_t md5_buffer[4]; 1408 u_int32_t data; 1409 int idx, i; 1410 struct md5_add add; 1411 #ifdef INET6 1412 const boolean_t isipv6 = sc->sc_inc.inc_isipv6; 1413 #else 1414 const boolean_t isipv6 = FALSE; 1415 #endif 1416 1417 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK; 1418 if (tcp_secret[idx].ts_expire < ticks) { 1419 for (i = 0; i < 4; i++) 1420 tcp_secret[idx].ts_secbits[i] = karc4random(); 1421 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT; 1422 } 1423 for (data = NELEM(tcp_msstab) - 1; data > 0; data--) 1424 if (tcp_msstab[data] <= sc->sc_peer_mss) 1425 break; 1426 data = (data << SYNCOOKIE_WNDBITS) | idx; 1427 data ^= sc->sc_irs; /* peer's iss */ 1428 MD5Init(&syn_ctx); 1429 if (isipv6) { 1430 MD5Add(sc->sc_inc.inc6_laddr); 1431 MD5Add(sc->sc_inc.inc6_faddr); 1432 add.laddr = 0; 1433 add.faddr = 0; 1434 } else { 1435 add.laddr = sc->sc_inc.inc_laddr.s_addr; 1436 add.faddr = sc->sc_inc.inc_faddr.s_addr; 1437 } 1438 add.lport = sc->sc_inc.inc_lport; 1439 add.fport = sc->sc_inc.inc_fport; 1440 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1441 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1442 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1443 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1444 MD5Add(add); 1445 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1446 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK); 1447 return (data); 1448 } 1449 1450 static struct syncache * 1451 syncookie_lookup(struct in_conninfo *inc, struct tcphdr *th, struct socket *so) 1452 { 1453 u_int32_t md5_buffer[4]; 1454 struct syncache *sc; 1455 u_int32_t data; 1456 int wnd, idx; 1457 struct md5_add add; 1458 1459 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */ 1460 idx = data & SYNCOOKIE_WNDMASK; 1461 if (tcp_secret[idx].ts_expire < ticks || 1462 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) 1463 return (NULL); 1464 MD5Init(&syn_ctx); 1465 #ifdef INET6 1466 if (inc->inc_isipv6) { 1467 MD5Add(inc->inc6_laddr); 1468 MD5Add(inc->inc6_faddr); 1469 add.laddr = 0; 1470 add.faddr = 0; 1471 } else 1472 #endif 1473 { 1474 add.laddr = inc->inc_laddr.s_addr; 1475 add.faddr = inc->inc_faddr.s_addr; 1476 } 1477 add.lport = inc->inc_lport; 1478 add.fport = inc->inc_fport; 1479 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1480 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1481 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1482 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1483 MD5Add(add); 1484 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1485 data ^= md5_buffer[0]; 1486 if (data & ~SYNCOOKIE_DATAMASK) 1487 return (NULL); 1488 data = data >> SYNCOOKIE_WNDBITS; 1489 1490 /* 1491 * Fill in the syncache values. 1492 * XXX duplicate code from syncache_add 1493 */ 1494 sc = kmalloc(sizeof(struct syncache), M_SYNCACHE, M_WAITOK|M_ZERO); 1495 sc->sc_ipopts = NULL; 1496 sc->sc_inc.inc_fport = inc->inc_fport; 1497 sc->sc_inc.inc_lport = inc->inc_lport; 1498 #ifdef INET6 1499 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1500 if (inc->inc_isipv6) { 1501 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1502 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1503 sc->sc_route6.ro_rt = NULL; 1504 } else 1505 #endif 1506 { 1507 sc->sc_inc.inc_faddr = inc->inc_faddr; 1508 sc->sc_inc.inc_laddr = inc->inc_laddr; 1509 sc->sc_route.ro_rt = NULL; 1510 } 1511 sc->sc_irs = th->th_seq - 1; 1512 sc->sc_iss = th->th_ack - 1; 1513 wnd = ssb_space(&so->so_rcv); 1514 wnd = imax(wnd, 0); 1515 wnd = imin(wnd, TCP_MAXWIN); 1516 sc->sc_wnd = wnd; 1517 sc->sc_flags = 0; 1518 sc->sc_rxtslot = 0; 1519 sc->sc_peer_mss = tcp_msstab[data]; 1520 return (sc); 1521 } 1522 1523 static int 1524 syncache_sysctl_count(SYSCTL_HANDLER_ARGS) 1525 { 1526 u_int count = 0; 1527 int cpu; 1528 1529 for (cpu = 0; cpu < netisr_ncpus; ++cpu) 1530 count += tcp_syncache_percpu[cpu]->cache_count; 1531 return sysctl_handle_int(oidp, &count, 0, req); 1532 } 1533