1 /* $OpenBSD: ip_ipsp.c,v 1.214 2015/05/23 12:38:53 markus Exp $ */ 2 /* 3 * The authors of this code are John Ioannidis (ji@tla.org), 4 * Angelos D. Keromytis (kermit@csd.uch.gr), 5 * Niels Provos (provos@physnet.uni-hamburg.de) and 6 * Niklas Hallqvist (niklas@appli.se). 7 * 8 * The original version of this code was written by John Ioannidis 9 * for BSD/OS in Athens, Greece, in November 1995. 10 * 11 * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, 12 * by Angelos D. Keromytis. 13 * 14 * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis 15 * and Niels Provos. 16 * 17 * Additional features in 1999 by Angelos D. Keromytis and Niklas Hallqvist. 18 * 19 * Copyright (c) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, 20 * Angelos D. Keromytis and Niels Provos. 21 * Copyright (c) 1999 Niklas Hallqvist. 22 * Copyright (c) 2001, Angelos D. Keromytis. 23 * 24 * Permission to use, copy, and modify this software with or without fee 25 * is hereby granted, provided that this entire notice is included in 26 * all copies of any software which is or includes a copy or 27 * modification of this software. 28 * You may use this code under the GNU public license if you so wish. Please 29 * contribute changes back to the authors under this freer than GPL license 30 * so that we may further the use of strong encryption without limitations to 31 * all. 32 * 33 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 34 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 35 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 36 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 37 * PURPOSE. 38 */ 39 40 #include "pf.h" 41 #include "pfsync.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/mbuf.h> 46 #include <sys/socket.h> 47 #include <sys/kernel.h> 48 #include <sys/timeout.h> 49 50 #include <net/if.h> 51 #include <net/route.h> 52 53 #include <netinet/in.h> 54 #include <netinet/ip.h> 55 #include <netinet/in_pcb.h> 56 #include <netinet/ip_var.h> 57 58 #if NPF > 0 59 #include <net/pfvar.h> 60 #endif 61 62 #if NPFSYNC > 0 63 #include <net/if_pfsync.h> 64 #endif 65 66 #include <netinet/ip_ipsp.h> 67 #include <net/pfkeyv2.h> 68 69 #ifdef DDB 70 #include <ddb/db_output.h> 71 void tdb_hashstats(void); 72 #endif 73 74 #ifdef ENCDEBUG 75 #define DPRINTF(x) if (encdebug) printf x 76 #else 77 #define DPRINTF(x) 78 #endif 79 80 void tdb_rehash(void); 81 void tdb_timeout(void *v); 82 void tdb_firstuse(void *v); 83 void tdb_soft_timeout(void *v); 84 void tdb_soft_firstuse(void *v); 85 int tdb_hash(u_int, u_int32_t, union sockaddr_union *, u_int8_t); 86 87 int ipsec_in_use = 0; 88 u_int64_t ipsec_last_added = 0; 89 90 struct ipsec_policy_head ipsec_policy_head = 91 TAILQ_HEAD_INITIALIZER(ipsec_policy_head); 92 struct ipsec_acquire_head ipsec_acquire_head = 93 TAILQ_HEAD_INITIALIZER(ipsec_acquire_head); 94 95 u_int32_t ipsec_ids_next_flow = 1; /* may not be zero */ 96 int ipsec_ids_idle = 100; /* keep free ids for 100s */ 97 struct ipsec_ids_tree ipsec_ids_tree; 98 struct ipsec_ids_flows ipsec_ids_flows; 99 100 void ipsp_ids_timeout(void *); 101 static int ipsp_ids_cmp(struct ipsec_ids *, struct ipsec_ids *); 102 static int ipsp_ids_flow_cmp(struct ipsec_ids *, struct ipsec_ids *); 103 RB_PROTOTYPE(ipsec_ids_tree, ipsec_ids, id_node_flow, ipsp_ids_cmp); 104 RB_PROTOTYPE(ipsec_ids_flows, ipsec_ids, id_node_id, ipsp_ids_flow_cmp); 105 RB_GENERATE(ipsec_ids_tree, ipsec_ids, id_node_flow, ipsp_ids_cmp); 106 RB_GENERATE(ipsec_ids_flows, ipsec_ids, id_node_id, ipsp_ids_flow_cmp); 107 108 /* 109 * This is the proper place to define the various encapsulation transforms. 110 */ 111 112 struct xformsw xformsw[] = { 113 #ifdef IPSEC 114 { XF_IP4, 0, "IPv4 Simple Encapsulation", 115 ipe4_attach, ipe4_init, ipe4_zeroize, 116 (int (*)(struct mbuf *, struct tdb *, int, int))ipe4_input, 117 ipip_output, }, 118 { XF_AH, XFT_AUTH, "IPsec AH", 119 ah_attach, ah_init, ah_zeroize, 120 ah_input, ah_output, }, 121 { XF_ESP, XFT_CONF|XFT_AUTH, "IPsec ESP", 122 esp_attach, esp_init, esp_zeroize, 123 esp_input, esp_output, }, 124 { XF_IPCOMP, XFT_COMP, "IPcomp", 125 ipcomp_attach, ipcomp_init, ipcomp_zeroize, 126 ipcomp_input, ipcomp_output, }, 127 #endif /* IPSEC */ 128 #ifdef TCP_SIGNATURE 129 { XF_TCPSIGNATURE, XFT_AUTH, "TCP MD5 Signature Option, RFC 2385", 130 tcp_signature_tdb_attach, tcp_signature_tdb_init, 131 tcp_signature_tdb_zeroize, tcp_signature_tdb_input, 132 tcp_signature_tdb_output, } 133 #endif /* TCP_SIGNATURE */ 134 }; 135 136 struct xformsw *xformswNXFORMSW = &xformsw[nitems(xformsw)]; 137 138 #define TDB_HASHSIZE_INIT 32 139 140 static SIPHASH_KEY tdbkey; 141 static struct tdb **tdbh = NULL; 142 static struct tdb **tdbdst = NULL; 143 static struct tdb **tdbsrc = NULL; 144 static u_int tdb_hashmask = TDB_HASHSIZE_INIT - 1; 145 static int tdb_count; 146 147 /* 148 * Our hashing function needs to stir things with a non-zero random multiplier 149 * so we cannot be DoS-attacked via choosing of the data to hash. 150 */ 151 int 152 tdb_hash(u_int rdomain, u_int32_t spi, union sockaddr_union *dst, 153 u_int8_t proto) 154 { 155 SIPHASH_CTX ctx; 156 157 SipHash24_Init(&ctx, &tdbkey); 158 SipHash24_Update(&ctx, &rdomain, sizeof(rdomain)); 159 SipHash24_Update(&ctx, &spi, sizeof(spi)); 160 SipHash24_Update(&ctx, &proto, sizeof(proto)); 161 SipHash24_Update(&ctx, dst, SA_LEN(&dst->sa)); 162 163 return (SipHash24_End(&ctx) & tdb_hashmask); 164 } 165 166 /* 167 * Reserve an SPI; the SA is not valid yet though. We use 0 as 168 * an error return value. 169 */ 170 u_int32_t 171 reserve_spi(u_int rdomain, u_int32_t sspi, u_int32_t tspi, 172 union sockaddr_union *src, union sockaddr_union *dst, 173 u_int8_t sproto, int *errval) 174 { 175 struct tdb *tdbp, *exists; 176 u_int32_t spi; 177 int nums, s; 178 179 /* Don't accept ranges only encompassing reserved SPIs. */ 180 if (sproto != IPPROTO_IPCOMP && 181 (tspi < sspi || tspi <= SPI_RESERVED_MAX)) { 182 (*errval) = EINVAL; 183 return 0; 184 } 185 if (sproto == IPPROTO_IPCOMP && (tspi < sspi || 186 tspi <= CPI_RESERVED_MAX || 187 tspi >= CPI_PRIVATE_MIN)) { 188 (*errval) = EINVAL; 189 return 0; 190 } 191 192 /* Limit the range to not include reserved areas. */ 193 if (sspi <= SPI_RESERVED_MAX) 194 sspi = SPI_RESERVED_MAX + 1; 195 196 /* For IPCOMP the CPI is only 16 bits long, what a good idea.... */ 197 198 if (sproto == IPPROTO_IPCOMP) { 199 u_int32_t t; 200 if (sspi >= 0x10000) 201 sspi = 0xffff; 202 if (tspi >= 0x10000) 203 tspi = 0xffff; 204 if (sspi > tspi) { 205 t = sspi; sspi = tspi; tspi = t; 206 } 207 } 208 209 if (sspi == tspi) /* Asking for a specific SPI. */ 210 nums = 1; 211 else 212 nums = 100; /* Arbitrarily chosen */ 213 214 /* allocate ahead of time to avoid potential sleeping race in loop */ 215 tdbp = tdb_alloc(rdomain); 216 217 while (nums--) { 218 if (sspi == tspi) /* Specific SPI asked. */ 219 spi = tspi; 220 else /* Range specified */ 221 spi = sspi + arc4random_uniform(tspi - sspi); 222 223 /* Don't allocate reserved SPIs. */ 224 if (spi >= SPI_RESERVED_MIN && spi <= SPI_RESERVED_MAX) 225 continue; 226 else 227 spi = htonl(spi); 228 229 /* Check whether we're using this SPI already. */ 230 s = splsoftnet(); 231 exists = gettdb(rdomain, spi, dst, sproto); 232 splx(s); 233 234 if (exists) 235 continue; 236 237 238 tdbp->tdb_spi = spi; 239 bcopy(&dst->sa, &tdbp->tdb_dst.sa, SA_LEN(&dst->sa)); 240 bcopy(&src->sa, &tdbp->tdb_src.sa, SA_LEN(&src->sa)); 241 tdbp->tdb_sproto = sproto; 242 tdbp->tdb_flags |= TDBF_INVALID; /* Mark SA invalid for now. */ 243 tdbp->tdb_satype = SADB_SATYPE_UNSPEC; 244 puttdb(tdbp); 245 246 /* Setup a "silent" expiration (since TDBF_INVALID's set). */ 247 if (ipsec_keep_invalid > 0) { 248 tdbp->tdb_flags |= TDBF_TIMER; 249 tdbp->tdb_exp_timeout = ipsec_keep_invalid; 250 timeout_add_sec(&tdbp->tdb_timer_tmo, 251 ipsec_keep_invalid); 252 } 253 254 return spi; 255 } 256 257 (*errval) = EEXIST; 258 tdb_free(tdbp); 259 return 0; 260 } 261 262 /* 263 * An IPSP SAID is really the concatenation of the SPI found in the 264 * packet, the destination address of the packet and the IPsec protocol. 265 * When we receive an IPSP packet, we need to look up its tunnel descriptor 266 * block, based on the SPI in the packet and the destination address (which 267 * is really one of our addresses if we received the packet! 268 * 269 * Caller is responsible for setting at least splsoftnet(). 270 */ 271 struct tdb * 272 gettdb(u_int rdomain, u_int32_t spi, union sockaddr_union *dst, u_int8_t proto) 273 { 274 u_int32_t hashval; 275 struct tdb *tdbp; 276 277 if (tdbh == NULL) 278 return (struct tdb *) NULL; 279 280 hashval = tdb_hash(rdomain, spi, dst, proto); 281 282 for (tdbp = tdbh[hashval]; tdbp != NULL; tdbp = tdbp->tdb_hnext) 283 if ((tdbp->tdb_spi == spi) && (tdbp->tdb_sproto == proto) && 284 (tdbp->tdb_rdomain == rdomain) && 285 !memcmp(&tdbp->tdb_dst, dst, SA_LEN(&dst->sa))) 286 break; 287 288 return tdbp; 289 } 290 291 /* 292 * Same as gettdb() but compare SRC as well, so we 293 * use the tdbsrc[] hash table. Setting spi to 0 294 * matches all SPIs. 295 */ 296 struct tdb * 297 gettdbbysrcdst(u_int rdomain, u_int32_t spi, union sockaddr_union *src, 298 union sockaddr_union *dst, u_int8_t proto) 299 { 300 u_int32_t hashval; 301 struct tdb *tdbp; 302 union sockaddr_union su_null; 303 304 if (tdbsrc == NULL) 305 return (struct tdb *) NULL; 306 307 hashval = tdb_hash(rdomain, 0, src, proto); 308 309 for (tdbp = tdbsrc[hashval]; tdbp != NULL; tdbp = tdbp->tdb_snext) 310 if (tdbp->tdb_sproto == proto && 311 (spi == 0 || tdbp->tdb_spi == spi) && 312 (tdbp->tdb_rdomain == rdomain) && 313 ((tdbp->tdb_flags & TDBF_INVALID) == 0) && 314 (tdbp->tdb_dst.sa.sa_family == AF_UNSPEC || 315 !memcmp(&tdbp->tdb_dst, dst, SA_LEN(&dst->sa))) && 316 !memcmp(&tdbp->tdb_src, src, SA_LEN(&src->sa))) 317 break; 318 319 if (tdbp != NULL) 320 return (tdbp); 321 322 memset(&su_null, 0, sizeof(su_null)); 323 su_null.sa.sa_len = sizeof(struct sockaddr); 324 hashval = tdb_hash(rdomain, 0, &su_null, proto); 325 326 for (tdbp = tdbsrc[hashval]; tdbp != NULL; tdbp = tdbp->tdb_snext) 327 if (tdbp->tdb_sproto == proto && 328 (spi == 0 || tdbp->tdb_spi == spi) && 329 (tdbp->tdb_rdomain == rdomain) && 330 ((tdbp->tdb_flags & TDBF_INVALID) == 0) && 331 (tdbp->tdb_dst.sa.sa_family == AF_UNSPEC || 332 !memcmp(&tdbp->tdb_dst, dst, SA_LEN(&dst->sa))) && 333 tdbp->tdb_src.sa.sa_family == AF_UNSPEC) 334 break; 335 336 return (tdbp); 337 } 338 339 /* 340 * Check that IDs match. Return true if so. The t* range of 341 * arguments contains information from TDBs; the p* range of 342 * arguments contains information from policies or already 343 * established TDBs. 344 */ 345 int 346 ipsp_aux_match(struct tdb *tdb, 347 struct ipsec_ids *ids, 348 struct sockaddr_encap *pfilter, 349 struct sockaddr_encap *pfiltermask) 350 { 351 if (ids != NULL) 352 if (tdb->tdb_ids == NULL || 353 !ipsp_ids_match(tdb->tdb_ids, ids)) 354 return 0; 355 356 /* Check for filter matches. */ 357 if (pfilter != NULL && pfiltermask != NULL && 358 tdb->tdb_filter.sen_type) { 359 /* 360 * XXX We should really be doing a subnet-check (see 361 * whether the TDB-associated filter is a subset 362 * of the policy's. For now, an exact match will solve 363 * most problems (all this will do is make every 364 * policy get its own SAs). 365 */ 366 if (memcmp(&tdb->tdb_filter, pfilter, 367 sizeof(struct sockaddr_encap)) || 368 memcmp(&tdb->tdb_filtermask, pfiltermask, 369 sizeof(struct sockaddr_encap))) 370 return 0; 371 } 372 373 return 1; 374 } 375 376 /* 377 * Get an SA given the remote address, the security protocol type, and 378 * the desired IDs. 379 */ 380 struct tdb * 381 gettdbbydst(u_int rdomain, union sockaddr_union *dst, u_int8_t sproto, 382 struct ipsec_ids *ids, 383 struct sockaddr_encap *filter, struct sockaddr_encap *filtermask) 384 { 385 u_int32_t hashval; 386 struct tdb *tdbp; 387 388 if (tdbdst == NULL) 389 return (struct tdb *) NULL; 390 391 hashval = tdb_hash(rdomain, 0, dst, sproto); 392 393 for (tdbp = tdbdst[hashval]; tdbp != NULL; tdbp = tdbp->tdb_dnext) 394 if ((tdbp->tdb_sproto == sproto) && 395 (tdbp->tdb_rdomain == rdomain) && 396 ((tdbp->tdb_flags & TDBF_INVALID) == 0) && 397 (!memcmp(&tdbp->tdb_dst, dst, SA_LEN(&dst->sa)))) { 398 /* Do IDs match ? */ 399 if (!ipsp_aux_match(tdbp, ids, filter, filtermask)) 400 continue; 401 break; 402 } 403 404 return tdbp; 405 } 406 407 /* 408 * Get an SA given the source address, the security protocol type, and 409 * the desired IDs. 410 */ 411 struct tdb * 412 gettdbbysrc(u_int rdomain, union sockaddr_union *src, u_int8_t sproto, 413 struct ipsec_ids *ids, 414 struct sockaddr_encap *filter, struct sockaddr_encap *filtermask) 415 { 416 u_int32_t hashval; 417 struct tdb *tdbp; 418 419 if (tdbsrc == NULL) 420 return (struct tdb *) NULL; 421 422 hashval = tdb_hash(rdomain, 0, src, sproto); 423 424 for (tdbp = tdbsrc[hashval]; tdbp != NULL; tdbp = tdbp->tdb_snext) 425 if ((tdbp->tdb_sproto == sproto) && 426 (tdbp->tdb_rdomain == rdomain) && 427 ((tdbp->tdb_flags & TDBF_INVALID) == 0) && 428 (!memcmp(&tdbp->tdb_src, src, SA_LEN(&src->sa)))) { 429 /* Check whether IDs match */ 430 if (!ipsp_aux_match(tdbp, ids, filter, 431 filtermask)) 432 continue; 433 break; 434 } 435 436 return tdbp; 437 } 438 439 #if DDB 440 441 #define NBUCKETS 16 442 void 443 tdb_hashstats(void) 444 { 445 int i, cnt, buckets[NBUCKETS]; 446 struct tdb *tdbp; 447 448 if (tdbh == NULL) { 449 db_printf("no tdb hash table\n"); 450 return; 451 } 452 453 memset(buckets, 0, sizeof(buckets)); 454 for (i = 0; i <= tdb_hashmask; i++) { 455 cnt = 0; 456 for (tdbp = tdbh[i]; cnt < NBUCKETS - 1 && tdbp != NULL; 457 tdbp = tdbp->tdb_hnext) 458 cnt++; 459 buckets[cnt]++; 460 } 461 462 db_printf("tdb cnt\t\tbucket cnt\n"); 463 for (i = 0; i < NBUCKETS; i++) 464 if (buckets[i] > 0) 465 db_printf("%d%s\t\t%d\n", i, i == NBUCKETS - 1 ? 466 "+" : "", buckets[i]); 467 } 468 #endif /* DDB */ 469 470 /* 471 * Caller is responsible for setting at least splsoftnet(). 472 */ 473 int 474 tdb_walk(u_int rdomain, int (*walker)(struct tdb *, void *, int), void *arg) 475 { 476 int i, rval = 0; 477 struct tdb *tdbp, *next; 478 479 if (tdbh == NULL) 480 return ENOENT; 481 482 for (i = 0; i <= tdb_hashmask; i++) 483 for (tdbp = tdbh[i]; rval == 0 && tdbp != NULL; tdbp = next) { 484 next = tdbp->tdb_hnext; 485 486 if (rdomain != tdbp->tdb_rdomain) 487 continue; 488 489 if (i == tdb_hashmask && next == NULL) 490 rval = walker(tdbp, (void *)arg, 1); 491 else 492 rval = walker(tdbp, (void *)arg, 0); 493 } 494 495 return rval; 496 } 497 498 /* 499 * Called at splsoftclock(). 500 */ 501 void 502 tdb_timeout(void *v) 503 { 504 struct tdb *tdb = v; 505 506 if (!(tdb->tdb_flags & TDBF_TIMER)) 507 return; 508 509 /* If it's an "invalid" TDB do a silent expiration. */ 510 if (!(tdb->tdb_flags & TDBF_INVALID)) 511 pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_HARD); 512 tdb_delete(tdb); 513 } 514 515 void 516 tdb_firstuse(void *v) 517 { 518 struct tdb *tdb = v; 519 520 if (!(tdb->tdb_flags & TDBF_SOFT_FIRSTUSE)) 521 return; 522 523 /* If the TDB hasn't been used, don't renew it. */ 524 if (tdb->tdb_first_use != 0) 525 pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_HARD); 526 tdb_delete(tdb); 527 } 528 529 void 530 tdb_soft_timeout(void *v) 531 { 532 struct tdb *tdb = v; 533 534 if (!(tdb->tdb_flags & TDBF_SOFT_TIMER)) 535 return; 536 537 /* Soft expirations. */ 538 pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_SOFT); 539 tdb->tdb_flags &= ~TDBF_SOFT_TIMER; 540 } 541 542 void 543 tdb_soft_firstuse(void *v) 544 { 545 struct tdb *tdb = v; 546 547 if (!(tdb->tdb_flags & TDBF_SOFT_FIRSTUSE)) 548 return; 549 550 /* If the TDB hasn't been used, don't renew it. */ 551 if (tdb->tdb_first_use != 0) 552 pfkeyv2_expire(tdb, SADB_EXT_LIFETIME_SOFT); 553 tdb->tdb_flags &= ~TDBF_SOFT_FIRSTUSE; 554 } 555 556 /* 557 * Caller is responsible for splsoftnet(). 558 */ 559 void 560 tdb_rehash(void) 561 { 562 struct tdb **new_tdbh, **new_tdbdst, **new_srcaddr, *tdbp, *tdbnp; 563 u_int i, old_hashmask = tdb_hashmask; 564 u_int32_t hashval; 565 566 tdb_hashmask = (tdb_hashmask << 1) | 1; 567 568 arc4random_buf(&tdbkey, sizeof(tdbkey)); 569 new_tdbh = mallocarray(tdb_hashmask + 1, sizeof(struct tdb *), M_TDB, 570 M_WAITOK | M_ZERO); 571 new_tdbdst = mallocarray(tdb_hashmask + 1, sizeof(struct tdb *), M_TDB, 572 M_WAITOK | M_ZERO); 573 new_srcaddr = mallocarray(tdb_hashmask + 1, sizeof(struct tdb *), M_TDB, 574 M_WAITOK | M_ZERO); 575 576 for (i = 0; i <= old_hashmask; i++) { 577 for (tdbp = tdbh[i]; tdbp != NULL; tdbp = tdbnp) { 578 tdbnp = tdbp->tdb_hnext; 579 hashval = tdb_hash(tdbp->tdb_rdomain, 580 tdbp->tdb_spi, &tdbp->tdb_dst, 581 tdbp->tdb_sproto); 582 tdbp->tdb_hnext = new_tdbh[hashval]; 583 new_tdbh[hashval] = tdbp; 584 } 585 586 for (tdbp = tdbdst[i]; tdbp != NULL; tdbp = tdbnp) { 587 tdbnp = tdbp->tdb_dnext; 588 hashval = tdb_hash(tdbp->tdb_rdomain, 589 0, &tdbp->tdb_dst, 590 tdbp->tdb_sproto); 591 tdbp->tdb_dnext = new_tdbdst[hashval]; 592 new_tdbdst[hashval] = tdbp; 593 } 594 595 for (tdbp = tdbsrc[i]; tdbp != NULL; tdbp = tdbnp) { 596 tdbnp = tdbp->tdb_snext; 597 hashval = tdb_hash(tdbp->tdb_rdomain, 598 0, &tdbp->tdb_src, 599 tdbp->tdb_sproto); 600 tdbp->tdb_snext = new_srcaddr[hashval]; 601 new_srcaddr[hashval] = tdbp; 602 } 603 } 604 605 free(tdbh, M_TDB, 0); 606 tdbh = new_tdbh; 607 608 free(tdbdst, M_TDB, 0); 609 tdbdst = new_tdbdst; 610 611 free(tdbsrc, M_TDB, 0); 612 tdbsrc = new_srcaddr; 613 } 614 615 /* 616 * Add TDB in the hash table. 617 */ 618 void 619 puttdb(struct tdb *tdbp) 620 { 621 u_int32_t hashval; 622 int s = splsoftnet(); 623 624 if (tdbh == NULL) { 625 arc4random_buf(&tdbkey, sizeof(tdbkey)); 626 tdbh = mallocarray(tdb_hashmask + 1, sizeof(struct tdb *), 627 M_TDB, M_WAITOK | M_ZERO); 628 tdbdst = mallocarray(tdb_hashmask + 1, sizeof(struct tdb *), 629 M_TDB, M_WAITOK | M_ZERO); 630 tdbsrc = mallocarray(tdb_hashmask + 1, sizeof(struct tdb *), 631 M_TDB, M_WAITOK | M_ZERO); 632 } 633 634 hashval = tdb_hash(tdbp->tdb_rdomain, tdbp->tdb_spi, 635 &tdbp->tdb_dst, tdbp->tdb_sproto); 636 637 /* 638 * Rehash if this tdb would cause a bucket to have more than 639 * two items and if the number of tdbs exceed 10% of the 640 * bucket count. This number is arbitratily chosen and is 641 * just a measure to not keep rehashing when adding and 642 * removing tdbs which happens to always end up in the same 643 * bucket, which is not uncommon when doing manual keying. 644 */ 645 if (tdbh[hashval] != NULL && tdbh[hashval]->tdb_hnext != NULL && 646 tdb_count * 10 > tdb_hashmask + 1) { 647 tdb_rehash(); 648 hashval = tdb_hash(tdbp->tdb_rdomain, tdbp->tdb_spi, 649 &tdbp->tdb_dst, tdbp->tdb_sproto); 650 } 651 652 tdbp->tdb_hnext = tdbh[hashval]; 653 tdbh[hashval] = tdbp; 654 655 hashval = tdb_hash(tdbp->tdb_rdomain, 0, &tdbp->tdb_dst, 656 tdbp->tdb_sproto); 657 tdbp->tdb_dnext = tdbdst[hashval]; 658 tdbdst[hashval] = tdbp; 659 660 hashval = tdb_hash(tdbp->tdb_rdomain, 0, &tdbp->tdb_src, 661 tdbp->tdb_sproto); 662 tdbp->tdb_snext = tdbsrc[hashval]; 663 tdbsrc[hashval] = tdbp; 664 665 tdb_count++; 666 667 ipsec_last_added = time_second; 668 669 splx(s); 670 } 671 672 /* 673 * Caller is responsible to set at least splsoftnet(). 674 */ 675 void 676 tdb_delete(struct tdb *tdbp) 677 { 678 struct tdb *tdbpp; 679 u_int32_t hashval; 680 int s; 681 682 if (tdbh == NULL) 683 return; 684 685 s = splsoftnet(); 686 687 hashval = tdb_hash(tdbp->tdb_rdomain, tdbp->tdb_spi, 688 &tdbp->tdb_dst, tdbp->tdb_sproto); 689 690 if (tdbh[hashval] == tdbp) { 691 tdbh[hashval] = tdbp->tdb_hnext; 692 } else { 693 for (tdbpp = tdbh[hashval]; tdbpp != NULL; 694 tdbpp = tdbpp->tdb_hnext) { 695 if (tdbpp->tdb_hnext == tdbp) { 696 tdbpp->tdb_hnext = tdbp->tdb_hnext; 697 break; 698 } 699 } 700 } 701 702 tdbp->tdb_hnext = NULL; 703 704 hashval = tdb_hash(tdbp->tdb_rdomain, 0, &tdbp->tdb_dst, 705 tdbp->tdb_sproto); 706 707 if (tdbdst[hashval] == tdbp) { 708 tdbdst[hashval] = tdbp->tdb_dnext; 709 } else { 710 for (tdbpp = tdbdst[hashval]; tdbpp != NULL; 711 tdbpp = tdbpp->tdb_dnext) { 712 if (tdbpp->tdb_dnext == tdbp) { 713 tdbpp->tdb_dnext = tdbp->tdb_dnext; 714 break; 715 } 716 } 717 } 718 719 tdbp->tdb_dnext = NULL; 720 721 hashval = tdb_hash(tdbp->tdb_rdomain, 0, &tdbp->tdb_src, 722 tdbp->tdb_sproto); 723 724 if (tdbsrc[hashval] == tdbp) { 725 tdbsrc[hashval] = tdbp->tdb_snext; 726 } 727 else { 728 for (tdbpp = tdbsrc[hashval]; tdbpp != NULL; 729 tdbpp = tdbpp->tdb_snext) { 730 if (tdbpp->tdb_snext == tdbp) { 731 tdbpp->tdb_snext = tdbp->tdb_snext; 732 break; 733 } 734 } 735 } 736 737 tdbp->tdb_snext = NULL; 738 tdb_free(tdbp); 739 tdb_count--; 740 741 splx(s); 742 } 743 744 /* 745 * Allocate a TDB and initialize a few basic fields. 746 */ 747 struct tdb * 748 tdb_alloc(u_int rdomain) 749 { 750 struct tdb *tdbp; 751 752 tdbp = malloc(sizeof(*tdbp), M_TDB, M_WAITOK | M_ZERO); 753 754 TAILQ_INIT(&tdbp->tdb_policy_head); 755 756 /* Record establishment time. */ 757 tdbp->tdb_established = time_second; 758 759 /* Save routing domain */ 760 tdbp->tdb_rdomain = rdomain; 761 762 /* Initialize timeouts. */ 763 timeout_set(&tdbp->tdb_timer_tmo, tdb_timeout, tdbp); 764 timeout_set(&tdbp->tdb_first_tmo, tdb_firstuse, tdbp); 765 timeout_set(&tdbp->tdb_stimer_tmo, tdb_soft_timeout, tdbp); 766 timeout_set(&tdbp->tdb_sfirst_tmo, tdb_soft_firstuse, tdbp); 767 768 return tdbp; 769 } 770 771 void 772 tdb_free(struct tdb *tdbp) 773 { 774 struct ipsec_policy *ipo; 775 776 if (tdbp->tdb_xform) { 777 (*(tdbp->tdb_xform->xf_zeroize))(tdbp); 778 tdbp->tdb_xform = NULL; 779 } 780 781 #if NPFSYNC > 0 782 /* Cleanup pfsync references */ 783 pfsync_delete_tdb(tdbp); 784 #endif 785 786 /* Cleanup SPD references. */ 787 for (ipo = TAILQ_FIRST(&tdbp->tdb_policy_head); ipo; 788 ipo = TAILQ_FIRST(&tdbp->tdb_policy_head)) { 789 TAILQ_REMOVE(&tdbp->tdb_policy_head, ipo, ipo_tdb_next); 790 ipo->ipo_tdb = NULL; 791 ipo->ipo_last_searched = 0; /* Force a re-search. */ 792 } 793 794 /* Remove expiration timeouts. */ 795 tdbp->tdb_flags &= ~(TDBF_FIRSTUSE | TDBF_SOFT_FIRSTUSE | TDBF_TIMER | 796 TDBF_SOFT_TIMER); 797 timeout_del(&tdbp->tdb_timer_tmo); 798 timeout_del(&tdbp->tdb_first_tmo); 799 timeout_del(&tdbp->tdb_stimer_tmo); 800 timeout_del(&tdbp->tdb_sfirst_tmo); 801 802 if (tdbp->tdb_ids) { 803 ipsp_ids_free(tdbp->tdb_ids); 804 tdbp->tdb_ids = NULL; 805 } 806 807 #if NPF > 0 808 if (tdbp->tdb_tag) { 809 pf_tag_unref(tdbp->tdb_tag); 810 tdbp->tdb_tag = 0; 811 } 812 #endif 813 814 if ((tdbp->tdb_onext) && (tdbp->tdb_onext->tdb_inext == tdbp)) 815 tdbp->tdb_onext->tdb_inext = NULL; 816 817 if ((tdbp->tdb_inext) && (tdbp->tdb_inext->tdb_onext == tdbp)) 818 tdbp->tdb_inext->tdb_onext = NULL; 819 820 free(tdbp, M_TDB, 0); 821 } 822 823 /* 824 * Do further initializations of a TDB. 825 */ 826 int 827 tdb_init(struct tdb *tdbp, u_int16_t alg, struct ipsecinit *ii) 828 { 829 struct xformsw *xsp; 830 int err; 831 #ifdef ENCDEBUG 832 char buf[INET6_ADDRSTRLEN]; 833 #endif 834 835 for (xsp = xformsw; xsp < xformswNXFORMSW; xsp++) { 836 if (xsp->xf_type == alg) { 837 err = (*(xsp->xf_init))(tdbp, xsp, ii); 838 return err; 839 } 840 } 841 842 DPRINTF(("tdb_init(): no alg %d for spi %08x, addr %s, proto %d\n", 843 alg, ntohl(tdbp->tdb_spi), ipsp_address(&tdbp->tdb_dst, buf, 844 sizeof(buf)), tdbp->tdb_sproto)); 845 846 return EINVAL; 847 } 848 849 #ifdef ENCDEBUG 850 /* Return a printable string for the address. */ 851 const char * 852 ipsp_address(union sockaddr_union *sa, char *buf, socklen_t size) 853 { 854 switch (sa->sa.sa_family) { 855 case AF_INET: 856 return inet_ntop(AF_INET, &sa->sin.sin_addr, 857 buf, (size_t)size); 858 859 #ifdef INET6 860 case AF_INET6: 861 return inet_ntop(AF_INET6, &sa->sin6.sin6_addr, 862 buf, (size_t)size); 863 #endif /* INET6 */ 864 865 default: 866 return "(unknown address family)"; 867 } 868 } 869 #endif /* ENCDEBUG */ 870 871 /* Check whether an IP{4,6} address is unspecified. */ 872 int 873 ipsp_is_unspecified(union sockaddr_union addr) 874 { 875 switch (addr.sa.sa_family) { 876 case AF_INET: 877 if (addr.sin.sin_addr.s_addr == INADDR_ANY) 878 return 1; 879 else 880 return 0; 881 882 #ifdef INET6 883 case AF_INET6: 884 if (IN6_IS_ADDR_UNSPECIFIED(&addr.sin6.sin6_addr)) 885 return 1; 886 else 887 return 0; 888 #endif /* INET6 */ 889 890 case 0: /* No family set. */ 891 default: 892 return 1; 893 } 894 } 895 896 int 897 ipsp_ids_match(struct ipsec_ids *a, struct ipsec_ids *b) 898 { 899 return a == b; 900 } 901 902 struct ipsec_ids * 903 ipsp_ids_insert(struct ipsec_ids *ids) 904 { 905 struct ipsec_ids *found; 906 u_int32_t start_flow; 907 908 found = RB_INSERT(ipsec_ids_tree, &ipsec_ids_tree, ids); 909 if (found) { 910 /* if refcount was zero, then timeout is running */ 911 if (found->id_refcount++ == 0) 912 timeout_del(&found->id_timeout); 913 DPRINTF(("%s: ids %p count %d\n", __func__, 914 found, found->id_refcount)); 915 return found; 916 } 917 ids->id_flow = start_flow = ipsec_ids_next_flow; 918 if (++ipsec_ids_next_flow == 0) 919 ipsec_ids_next_flow = 1; 920 while (RB_INSERT(ipsec_ids_flows, &ipsec_ids_flows, ids) != NULL) { 921 ids->id_flow = ipsec_ids_next_flow; 922 if (++ipsec_ids_next_flow == 0) 923 ipsec_ids_next_flow = 1; 924 if (ipsec_ids_next_flow == start_flow) { 925 DPRINTF(("ipsec_ids_next_flow exhausted %u\n", 926 ipsec_ids_next_flow)); 927 return NULL; 928 } 929 } 930 ids->id_refcount = 1; 931 DPRINTF(("%s: new ids %p flow %u\n", __func__, ids, ids->id_flow)); 932 timeout_set(&ids->id_timeout, ipsp_ids_timeout, ids); 933 return ids; 934 } 935 936 struct ipsec_ids * 937 ipsp_ids_lookup(u_int32_t ipsecflowinfo) 938 { 939 struct ipsec_ids key; 940 941 key.id_flow = ipsecflowinfo; 942 return RB_FIND(ipsec_ids_flows, &ipsec_ids_flows, &key); 943 } 944 945 /* free ids only from delayed timeout */ 946 void 947 ipsp_ids_timeout(void *arg) 948 { 949 struct ipsec_ids *ids = arg; 950 int s; 951 952 DPRINTF(("%s: ids %p count %d\n", __func__, ids, ids->id_refcount)); 953 KASSERT(ids->id_refcount == 0); 954 s = splsoftnet(); 955 RB_REMOVE(ipsec_ids_tree, &ipsec_ids_tree, ids); 956 RB_REMOVE(ipsec_ids_flows, &ipsec_ids_flows, ids); 957 free(ids->id_local, M_CREDENTIALS, 0); 958 free(ids->id_remote, M_CREDENTIALS, 0); 959 free(ids, M_CREDENTIALS, 0); 960 splx(s); 961 } 962 963 /* decrements refcount, actual free happens in timeout */ 964 void 965 ipsp_ids_free(struct ipsec_ids *ids) 966 { 967 /* 968 * If the refcount becomes zero, then a timeout is started. This 969 * timeout must be cancelled if refcount is increased from zero. 970 */ 971 DPRINTF(("%s: ids %p count %d\n", __func__, ids, ids->id_refcount)); 972 KASSERT(ids->id_refcount > 0); 973 if (--ids->id_refcount == 0) 974 timeout_add_sec(&ids->id_timeout, ipsec_ids_idle); 975 } 976 977 static int 978 ipsp_id_cmp(struct ipsec_id *a, struct ipsec_id *b) 979 { 980 if (a->type > b->type) 981 return 1; 982 if (a->type < b->type) 983 return -1; 984 if (a->len > b->len) 985 return 1; 986 if (a->len < b->len) 987 return -1; 988 return memcmp(a + 1, b + 1, a->len); 989 } 990 991 static int 992 ipsp_ids_cmp(struct ipsec_ids *a, struct ipsec_ids *b) 993 { 994 int ret; 995 996 ret = ipsp_id_cmp(a->id_remote, b->id_remote); 997 if (ret != 0) 998 return ret; 999 return ipsp_id_cmp(a->id_local, b->id_local); 1000 } 1001 1002 static int 1003 ipsp_ids_flow_cmp(struct ipsec_ids *a, struct ipsec_ids *b) 1004 { 1005 if (a->id_flow > b->id_flow) 1006 return 1; 1007 if (a->id_flow < b->id_flow) 1008 return -1; 1009 return 0; 1010 } 1011