1 /* $OpenBSD: rde_lsdb.c,v 1.30 2010/03/01 08:55:45 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/tree.h> 21 #include <stdlib.h> 22 #include <string.h> 23 #include <unistd.h> 24 25 #include "ospf6.h" 26 #include "ospf6d.h" 27 #include "rde.h" 28 #include "log.h" 29 30 struct vertex *vertex_get(struct lsa *, struct rde_nbr *, struct lsa_tree *); 31 32 int lsa_link_check(struct lsa *, u_int16_t); 33 int lsa_intra_a_pref_check(struct lsa *, u_int16_t); 34 int lsa_asext_check(struct lsa *, u_int16_t); 35 void lsa_timeout(int, short, void *); 36 void lsa_refresh(struct vertex *); 37 int lsa_equal(struct lsa *, struct lsa *); 38 int lsa_get_prefix(void *, u_int16_t, struct rt_prefix *); 39 40 RB_GENERATE(lsa_tree, vertex, entry, lsa_compare) 41 42 extern struct ospfd_conf *rdeconf; 43 44 void 45 lsa_init(struct lsa_tree *t) 46 { 47 RB_INIT(t); 48 } 49 50 int 51 lsa_compare(struct vertex *a, struct vertex *b) 52 { 53 if (a->type < b->type) 54 return (-1); 55 if (a->type > b->type) 56 return (1); 57 if (a->adv_rtr < b->adv_rtr) 58 return (-1); 59 if (a->adv_rtr > b->adv_rtr) 60 return (1); 61 if (a->ls_id < b->ls_id) 62 return (-1); 63 if (a->ls_id > b->ls_id) 64 return (1); 65 return (0); 66 } 67 68 69 struct vertex * 70 vertex_get(struct lsa *lsa, struct rde_nbr *nbr, struct lsa_tree *tree) 71 { 72 struct vertex *v; 73 struct timespec tp; 74 75 if ((v = calloc(1, sizeof(struct vertex))) == NULL) 76 fatal(NULL); 77 TAILQ_INIT(&v->nexthop); 78 v->area = nbr->area; 79 v->peerid = nbr->peerid; 80 v->lsa = lsa; 81 clock_gettime(CLOCK_MONOTONIC, &tp); 82 v->changed = v->stamp = tp.tv_sec; 83 v->cost = LS_INFINITY; 84 v->ls_id = ntohl(lsa->hdr.ls_id); 85 v->adv_rtr = ntohl(lsa->hdr.adv_rtr); 86 v->type = ntohs(lsa->hdr.type); 87 v->lsa_tree = tree; 88 89 if (!nbr->self) 90 v->flooded = 1; /* XXX fix me */ 91 v->self = nbr->self; 92 93 evtimer_set(&v->ev, lsa_timeout, v); 94 95 return (v); 96 } 97 98 void 99 vertex_free(struct vertex *v) 100 { 101 RB_REMOVE(lsa_tree, v->lsa_tree, v); 102 (void)evtimer_del(&v->ev); 103 free(v->lsa); 104 free(v); 105 } 106 107 /* returns -1 if a is older, 1 if newer and 0 if equal to b */ 108 int 109 lsa_newer(struct lsa_hdr *a, struct lsa_hdr *b) 110 { 111 int32_t a32, b32; 112 u_int16_t a16, b16; 113 int i; 114 115 if (a == NULL) 116 return (-1); 117 if (b == NULL) 118 return (1); 119 120 /* 121 * The sequence number is defined as signed 32-bit integer, 122 * no idea how IETF came up with such a stupid idea. 123 */ 124 a32 = (int32_t)ntohl(a->seq_num); 125 b32 = (int32_t)ntohl(b->seq_num); 126 127 if (a32 > b32) 128 return (1); 129 if (a32 < b32) 130 return (-1); 131 132 a16 = ntohs(a->ls_chksum); 133 b16 = ntohs(b->ls_chksum); 134 135 if (a16 > b16) 136 return (1); 137 if (a16 < b16) 138 return (-1); 139 140 a16 = ntohs(a->age); 141 b16 = ntohs(b->age); 142 143 if (a16 >= MAX_AGE && b16 >= MAX_AGE) 144 return (0); 145 if (b16 >= MAX_AGE) 146 return (-1); 147 if (a16 >= MAX_AGE) 148 return (1); 149 150 i = b16 - a16; 151 if (abs(i) > MAX_AGE_DIFF) 152 return (i > 0 ? 1 : -1); 153 154 return (0); 155 } 156 157 int 158 lsa_check(struct rde_nbr *nbr, struct lsa *lsa, u_int16_t len) 159 { 160 u_int32_t metric; 161 162 if (len < sizeof(lsa->hdr)) { 163 log_warnx("lsa_check: bad packet size"); 164 return (0); 165 } 166 if (ntohs(lsa->hdr.len) != len) { 167 log_warnx("lsa_check: bad packet size"); 168 return (0); 169 } 170 171 if (iso_cksum(lsa, len, 0)) { 172 log_warnx("lsa_check: bad packet checksum"); 173 return (0); 174 } 175 176 /* invalid ages */ 177 if ((ntohs(lsa->hdr.age) < 1 && !nbr->self) || 178 ntohs(lsa->hdr.age) > MAX_AGE) { 179 log_warnx("lsa_check: bad age"); 180 return (0); 181 } 182 183 /* invalid sequence number */ 184 if (ntohl(lsa->hdr.seq_num) == RESV_SEQ_NUM) { 185 log_warnx("ls_check: bad seq num"); 186 return (0); 187 } 188 189 switch (ntohs(lsa->hdr.type)) { 190 case LSA_TYPE_LINK: 191 if (!lsa_link_check(lsa, len)) 192 return (0); 193 break; 194 case LSA_TYPE_ROUTER: 195 if (len < sizeof(lsa->hdr) + sizeof(struct lsa_rtr)) { 196 log_warnx("lsa_check: bad LSA rtr packet"); 197 return (0); 198 } 199 len -= sizeof(lsa->hdr) + sizeof(struct lsa_rtr); 200 if (len % sizeof(struct lsa_rtr_link)) { 201 log_warnx("lsa_check: bad LSA rtr packet"); 202 return (0); 203 } 204 break; 205 case LSA_TYPE_NETWORK: 206 if ((len % sizeof(u_int32_t)) || 207 len < sizeof(lsa->hdr) + sizeof(u_int32_t)) { 208 return (0); 209 } 210 break; 211 case LSA_TYPE_INTER_A_PREFIX: 212 if (len < sizeof(lsa->hdr) + sizeof(lsa->data.pref_sum)) { 213 log_warnx("lsa_check: bad LSA prefix summary packet"); 214 return (0); 215 } 216 metric = ntohl(lsa->data.pref_sum.metric); 217 if (metric & ~LSA_METRIC_MASK) { 218 log_warnx("lsa_check: bad LSA summary metric"); 219 return (0); 220 } 221 if (lsa_get_prefix(((char *)lsa) + sizeof(lsa->hdr) + 222 sizeof(lsa->data.pref_sum), 223 len - sizeof(lsa->hdr) + sizeof(lsa->data.pref_sum), 224 NULL) == -1) { 225 log_warnx("lsa_check: " 226 "invalid LSA prefix summary packet"); 227 return (0); 228 } 229 break; 230 case LSA_TYPE_INTER_A_ROUTER: 231 if (len < sizeof(lsa->hdr) + sizeof(lsa->data.rtr_sum)) { 232 log_warnx("lsa_check: bad LSA router summary packet"); 233 return (0); 234 } 235 metric = ntohl(lsa->data.rtr_sum.metric); 236 if (metric & ~LSA_METRIC_MASK) { 237 log_warnx("lsa_check: bad LSA summary metric"); 238 return (0); 239 } 240 break; 241 case LSA_TYPE_INTRA_A_PREFIX: 242 if (!lsa_intra_a_pref_check(lsa, len)) 243 return (0); 244 break; 245 case LSA_TYPE_EXTERNAL: 246 /* AS-external-LSA are silently discarded in stub areas */ 247 if (nbr->area->stub) 248 return (0); 249 if (!lsa_asext_check(lsa, len)) 250 return (0); 251 break; 252 default: 253 log_warnx("lsa_check: unknown type %x", ntohs(lsa->hdr.type)); 254 return (0); 255 } 256 257 /* MaxAge handling */ 258 if (lsa->hdr.age == htons(MAX_AGE) && !nbr->self && lsa_find(nbr->iface, 259 lsa->hdr.type, lsa->hdr.ls_id, lsa->hdr.adv_rtr) == NULL && 260 !rde_nbr_loading(nbr->area)) { 261 /* 262 * if no neighbor in state Exchange or Loading 263 * ack LSA but don't add it. Needs to be a direct ack. 264 */ 265 rde_imsg_compose_ospfe(IMSG_LS_ACK, nbr->peerid, 0, &lsa->hdr, 266 sizeof(struct lsa_hdr)); 267 return (0); 268 } 269 270 return (1); 271 } 272 273 int 274 lsa_link_check(struct lsa *lsa, u_int16_t len) 275 { 276 char *buf = (char *)lsa; 277 struct lsa_link *llink; 278 u_int32_t i, off, npref; 279 int rv; 280 281 llink = (struct lsa_link *)(buf + sizeof(lsa->hdr)); 282 off = sizeof(lsa->hdr) + sizeof(struct lsa_link); 283 if (off > len) { 284 log_warnx("lsa_link_check: invalid LSA link packet, " 285 "short header"); 286 return (0); 287 } 288 289 len -= off; 290 npref = ntohl(llink->numprefix); 291 292 for (i = 0; i < npref; i++) { 293 rv = lsa_get_prefix(buf + off, len, NULL); 294 if (rv == -1) { 295 log_warnx("lsa_link_check: invalid LSA link packet"); 296 return (0); 297 } 298 off += rv; 299 len -= rv; 300 } 301 302 return (1); 303 } 304 305 int 306 lsa_intra_a_pref_check(struct lsa *lsa, u_int16_t len) 307 { 308 char *buf = (char *)lsa; 309 struct lsa_intra_prefix *iap; 310 u_int32_t i, off, npref; 311 int rv; 312 313 iap = (struct lsa_intra_prefix *)(buf + sizeof(lsa->hdr)); 314 off = sizeof(lsa->hdr) + sizeof(struct lsa_intra_prefix); 315 if (off > len) { 316 log_warnx("lsa_intra_a_pref_check: " 317 "invalid LSA intra area prefix packet, short header"); 318 return (0); 319 } 320 321 len -= off; 322 npref = ntohs(iap->numprefix); 323 324 for (i = 0; i < npref; i++) { 325 rv = lsa_get_prefix(buf + off, len, NULL); 326 if (rv == -1) { 327 log_warnx("lsa_intra_a_pref_check: " 328 "invalid LSA intra area prefix packet"); 329 return (0); 330 } 331 off += rv; 332 len -= rv; 333 } 334 335 return (1); 336 } 337 338 int 339 lsa_asext_check(struct lsa *lsa, u_int16_t len) 340 { 341 char *buf = (char *)lsa; 342 struct lsa_asext *asext; 343 struct in6_addr fw_addr; 344 u_int32_t metric; 345 u_int16_t ref_ls_type; 346 int rv; 347 u_int16_t total_len; 348 349 asext = (struct lsa_asext *)(buf + sizeof(lsa->hdr)); 350 351 if ((len % sizeof(u_int32_t)) || 352 len < sizeof(lsa->hdr) + sizeof(*asext)) { 353 log_warnx("lsa_asext_check: bad LSA as-external packet"); 354 return (0); 355 } 356 357 total_len = sizeof(lsa->hdr) + sizeof(*asext); 358 rv = lsa_get_prefix(&asext->prefix, len, NULL); 359 if (rv == -1) { 360 log_warnx("lsa_asext_check: bad LSA as-external packet"); 361 return (0); 362 } 363 total_len += rv - sizeof(struct lsa_prefix); 364 365 metric = ntohl(asext->metric); 366 if (metric & LSA_ASEXT_F_FLAG) { 367 if (total_len + sizeof(fw_addr) < len) { 368 bcopy(buf + total_len, &fw_addr, sizeof(fw_addr)); 369 if (IN6_IS_ADDR_UNSPECIFIED(&fw_addr) || 370 IN6_IS_ADDR_LINKLOCAL(&fw_addr)) { 371 log_warnx("lsa_asext_check: bad LSA " 372 "as-external forwarding address"); 373 return (0); 374 } 375 } 376 total_len += sizeof(fw_addr); 377 } 378 379 if (metric & LSA_ASEXT_T_FLAG) 380 total_len += sizeof(u_int32_t); 381 382 ref_ls_type = asext->prefix.metric; 383 if (ref_ls_type != 0) 384 total_len += sizeof(u_int32_t); 385 386 if (len != total_len) { 387 log_warnx("lsa_asext_check: bad LSA as-external length"); 388 return (0); 389 } 390 391 return (1); 392 } 393 394 int 395 lsa_self(struct lsa *lsa) 396 { 397 return rde_router_id() == lsa->hdr.adv_rtr; 398 } 399 400 void 401 lsa_flush(struct rde_nbr *nbr, struct lsa *lsa) 402 { 403 struct lsa *copy; 404 405 /* 406 * The LSA may not be altered because the caller may still 407 * use it, so a copy needs to be added to the LSDB. 408 * The copy will be reflooded via the default timeout handler. 409 */ 410 if ((copy = malloc(ntohs(lsa->hdr.len))) == NULL) 411 fatal("lsa_flush"); 412 memcpy(copy, lsa, ntohs(lsa->hdr.len)); 413 copy->hdr.age = htons(MAX_AGE); 414 (void)lsa_add(rde_nbr_self(nbr->area), copy); 415 } 416 417 void 418 lsa_reflood(struct vertex *v, struct lsa *new) 419 { 420 /* 421 * We only need to create a new instance by setting the LSA 422 * sequence number equal to the one of 'new' and calling 423 * lsa_refresh(). Actual flooding will be done by the caller. 424 */ 425 v->lsa->hdr.seq_num = new->hdr.seq_num; 426 lsa_refresh(v); 427 } 428 429 int 430 lsa_add(struct rde_nbr *nbr, struct lsa *lsa) 431 { 432 struct lsa_tree *tree; 433 struct vertex *new, *old; 434 struct timeval tv, now, res; 435 436 if (LSA_IS_SCOPE_AS(ntohs(lsa->hdr.type))) 437 tree = &asext_tree; 438 else if (LSA_IS_SCOPE_AREA(ntohs(lsa->hdr.type))) 439 tree = &nbr->area->lsa_tree; 440 else if (LSA_IS_SCOPE_LLOCAL(ntohs(lsa->hdr.type))) 441 tree = &nbr->iface->lsa_tree; 442 else 443 fatalx("unknown scope type"); 444 445 new = vertex_get(lsa, nbr, tree); 446 old = RB_INSERT(lsa_tree, tree, new); 447 448 if (old != NULL) { 449 if (old->deleted && evtimer_pending(&old->ev, &tv)) { 450 /* new update added before hold time expired */ 451 gettimeofday(&now, NULL); 452 timersub(&tv, &now, &res); 453 454 /* remove old LSA and insert new LSA with delay */ 455 vertex_free(old); 456 RB_INSERT(lsa_tree, tree, new); 457 new->deleted = 1; 458 459 if (evtimer_add(&new->ev, &res) != 0) 460 fatal("lsa_add"); 461 return (1); 462 } 463 if (!lsa_equal(new->lsa, old->lsa)) { 464 if (ntohs(lsa->hdr.type) != LSA_TYPE_EXTERNAL) 465 nbr->area->dirty = 1; 466 start_spf_timer(); 467 } 468 vertex_free(old); 469 RB_INSERT(lsa_tree, tree, new); 470 } else { 471 if (ntohs(lsa->hdr.type) != LSA_TYPE_EXTERNAL) 472 nbr->area->dirty = 1; 473 start_spf_timer(); 474 } 475 476 /* timeout handling either MAX_AGE or LS_REFRESH_TIME */ 477 timerclear(&tv); 478 479 if (nbr->self && ntohs(new->lsa->hdr.age) == DEFAULT_AGE) 480 tv.tv_sec = LS_REFRESH_TIME; 481 else 482 tv.tv_sec = MAX_AGE - ntohs(new->lsa->hdr.age); 483 484 if (evtimer_add(&new->ev, &tv) != 0) 485 fatal("lsa_add"); 486 return (0); 487 } 488 489 void 490 lsa_del(struct rde_nbr *nbr, struct lsa_hdr *lsa) 491 { 492 struct vertex *v; 493 struct timeval tv; 494 495 v = lsa_find(nbr->iface, lsa->type, lsa->ls_id, lsa->adv_rtr); 496 if (v == NULL) 497 return; 498 499 v->deleted = 1; 500 /* hold time to make sure that a new lsa is not added premature */ 501 timerclear(&tv); 502 tv.tv_sec = MIN_LS_INTERVAL; 503 if (evtimer_add(&v->ev, &tv) == -1) 504 fatal("lsa_del"); 505 } 506 507 void 508 lsa_age(struct vertex *v) 509 { 510 struct timespec tp; 511 time_t now; 512 int d; 513 u_int16_t age; 514 515 clock_gettime(CLOCK_MONOTONIC, &tp); 516 now = tp.tv_sec; 517 518 d = now - v->stamp; 519 /* set stamp so that at least new calls work */ 520 v->stamp = now; 521 522 if (d < 0) { 523 log_warnx("lsa_age: time went backwards"); 524 return; 525 } 526 527 age = ntohs(v->lsa->hdr.age); 528 if (age + d > MAX_AGE) 529 age = MAX_AGE; 530 else 531 age += d; 532 533 v->lsa->hdr.age = htons(age); 534 } 535 536 struct vertex * 537 lsa_find(struct iface *iface, u_int16_t type, u_int32_t ls_id, 538 u_int32_t adv_rtr) 539 { 540 struct lsa_tree *tree; 541 542 if (LSA_IS_SCOPE_AS(ntohs(type))) 543 tree = &asext_tree; 544 else if (LSA_IS_SCOPE_AREA(ntohs(type))) { 545 struct area *area; 546 547 if ((area = area_find(rdeconf, iface->area_id)) == NULL) 548 fatalx("interface lost area"); 549 tree = &area->lsa_tree; 550 } else if (LSA_IS_SCOPE_LLOCAL(ntohs(type))) 551 tree = &iface->lsa_tree; 552 else 553 fatalx("unknown scope type"); 554 555 return lsa_find_tree(tree, type, ls_id, adv_rtr); 556 557 } 558 559 struct vertex * 560 lsa_find_tree(struct lsa_tree *tree, u_int16_t type, u_int32_t ls_id, 561 u_int32_t adv_rtr) 562 { 563 struct vertex key; 564 struct vertex *v; 565 566 key.ls_id = ntohl(ls_id); 567 key.adv_rtr = ntohl(adv_rtr); 568 key.type = ntohs(type); 569 570 v = RB_FIND(lsa_tree, tree, &key); 571 572 /* LSA that are deleted are not findable */ 573 if (v && v->deleted) 574 return (NULL); 575 576 if (v) 577 lsa_age(v); 578 579 return (v); 580 } 581 582 struct vertex * 583 lsa_find_rtr(struct area *area, u_int32_t rtr_id) 584 { 585 return lsa_find_rtr_frag(area, rtr_id, 0); 586 } 587 588 struct vertex * 589 lsa_find_rtr_frag(struct area *area, u_int32_t rtr_id, unsigned int n) 590 { 591 struct vertex *v; 592 struct vertex key; 593 unsigned int i; 594 595 key.ls_id = 0; 596 key.adv_rtr = ntohl(rtr_id); 597 key.type = LSA_TYPE_ROUTER; 598 599 i = 0; 600 v = RB_NFIND(lsa_tree, &area->lsa_tree, &key); 601 while (v) { 602 if (v->type != LSA_TYPE_ROUTER || 603 v->adv_rtr != ntohl(rtr_id)) { 604 /* no more interesting LSAs */ 605 v = NULL; 606 break; 607 } 608 if (!v->deleted) { 609 if (i >= n) 610 break; 611 i++; 612 } 613 v = RB_NEXT(lsa_tree, &area->lsa_tree, v); 614 } 615 616 if (v) { 617 if (i == n) 618 lsa_age(v); 619 else 620 v = NULL; 621 } 622 623 return (v); 624 } 625 626 u_int32_t 627 lsa_find_lsid(struct lsa_tree *tree, u_int16_t type, u_int32_t adv_rtr, 628 int (*cmp)(struct lsa *, struct lsa *), struct lsa *lsa) 629 { 630 #define MIN(x, y) ((x) < (y) ? (x) : (y)) 631 struct vertex *v; 632 struct vertex key; 633 u_int32_t min, cur; 634 635 key.ls_id = 0; 636 key.adv_rtr = ntohl(adv_rtr); 637 key.type = ntohs(type); 638 639 cur = 0; 640 min = 0xffffffffU; 641 v = RB_NFIND(lsa_tree, tree, &key); 642 while (v) { 643 if (v->type != key.type || 644 v->adv_rtr != key.adv_rtr) { 645 /* no more interesting LSAs */ 646 min = MIN(min, cur + 1); 647 return (htonl(min)); 648 } 649 if (cmp(lsa, v->lsa) == 0) { 650 /* match, return this ls_id */ 651 return (htonl(v->ls_id)); 652 } 653 if (v->ls_id > cur + 1) 654 min = cur + 1; 655 cur = v->ls_id; 656 if (cur + 1 < cur) 657 fatalx("King Bula sez: somebody got to many LSA"); 658 v = RB_NEXT(lsa_tree, tree, v); 659 } 660 min = MIN(min, cur + 1); 661 return (htonl(min)); 662 #undef MIN 663 } 664 665 u_int16_t 666 lsa_num_links(struct vertex *v) 667 { 668 unsigned int n = 1; 669 u_int16_t nlinks = 0; 670 671 switch (v->type) { 672 case LSA_TYPE_ROUTER: 673 do { 674 nlinks += ((ntohs(v->lsa->hdr.len) - 675 sizeof(struct lsa_hdr) - sizeof(struct lsa_rtr)) / 676 sizeof(struct lsa_rtr_link)); 677 v = lsa_find_rtr_frag(v->area, htonl(v->adv_rtr), n++); 678 } while (v); 679 return nlinks; 680 case LSA_TYPE_NETWORK: 681 return ((ntohs(v->lsa->hdr.len) - sizeof(struct lsa_hdr) - 682 sizeof(struct lsa_net)) / sizeof(struct lsa_net_link)); 683 default: 684 fatalx("lsa_num_links: invalid LSA type"); 685 } 686 687 return (0); 688 } 689 690 void 691 lsa_snap(struct rde_nbr *nbr, u_int32_t peerid) 692 { 693 struct lsa_tree *tree = &nbr->area->lsa_tree; 694 struct vertex *v; 695 696 do { 697 RB_FOREACH(v, lsa_tree, tree) { 698 if (v->deleted) 699 continue; 700 lsa_age(v); 701 if (ntohs(v->lsa->hdr.age) >= MAX_AGE) { 702 rde_imsg_compose_ospfe(IMSG_LS_UPD, peerid, 703 0, &v->lsa->hdr, ntohs(v->lsa->hdr.len)); 704 } else { 705 rde_imsg_compose_ospfe(IMSG_DB_SNAPSHOT, peerid, 706 0, &v->lsa->hdr, sizeof(struct lsa_hdr)); 707 } 708 } 709 if (tree == &asext_tree) 710 break; 711 if (tree == &nbr->area->lsa_tree) { 712 tree = &nbr->iface->lsa_tree; 713 continue; 714 } else 715 tree = &asext_tree; 716 717 } while (1); 718 } 719 720 void 721 lsa_dump(struct lsa_tree *tree, int imsg_type, pid_t pid) 722 { 723 struct vertex *v; 724 725 RB_FOREACH(v, lsa_tree, tree) { 726 if (v->deleted) 727 continue; 728 lsa_age(v); 729 switch (imsg_type) { 730 case IMSG_CTL_SHOW_DATABASE: 731 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_DATABASE, 0, pid, 732 &v->lsa->hdr, ntohs(v->lsa->hdr.len)); 733 continue; 734 case IMSG_CTL_SHOW_DB_SELF: 735 if (v->lsa->hdr.adv_rtr == rde_router_id()) 736 break; 737 continue; 738 case IMSG_CTL_SHOW_DB_EXT: 739 if (v->type == LSA_TYPE_EXTERNAL) 740 break; 741 continue; 742 case IMSG_CTL_SHOW_DB_LINK: 743 if (v->type == LSA_TYPE_LINK) 744 break; 745 continue; 746 case IMSG_CTL_SHOW_DB_NET: 747 if (v->type == LSA_TYPE_NETWORK) 748 break; 749 continue; 750 case IMSG_CTL_SHOW_DB_RTR: 751 if (v->type == LSA_TYPE_ROUTER) 752 break; 753 continue; 754 case IMSG_CTL_SHOW_DB_INTRA: 755 if (v->type == LSA_TYPE_INTRA_A_PREFIX) 756 break; 757 case IMSG_CTL_SHOW_DB_SUM: 758 if (v->type == LSA_TYPE_INTER_A_PREFIX) 759 break; 760 continue; 761 case IMSG_CTL_SHOW_DB_ASBR: 762 if (v->type == LSA_TYPE_INTER_A_ROUTER) 763 break; 764 continue; 765 default: 766 log_warnx("lsa_dump: unknown imsg type"); 767 return; 768 } 769 rde_imsg_compose_ospfe(imsg_type, 0, pid, &v->lsa->hdr, 770 ntohs(v->lsa->hdr.len)); 771 } 772 } 773 774 /* ARGSUSED */ 775 void 776 lsa_timeout(int fd, short event, void *bula) 777 { 778 struct vertex *v = bula; 779 struct timeval tv; 780 781 lsa_age(v); 782 783 if (v->deleted) { 784 if (ntohs(v->lsa->hdr.age) >= MAX_AGE) { 785 vertex_free(v); 786 } else { 787 v->deleted = 0; 788 789 /* schedule recalculation of the RIB */ 790 if (v->lsa->hdr.type != LSA_TYPE_EXTERNAL) 791 v->area->dirty = 1; 792 start_spf_timer(); 793 794 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0, 795 v->lsa, ntohs(v->lsa->hdr.len)); 796 797 /* timeout handling either MAX_AGE or LS_REFRESH_TIME */ 798 timerclear(&tv); 799 if (v->self) 800 tv.tv_sec = LS_REFRESH_TIME; 801 else 802 tv.tv_sec = MAX_AGE - ntohs(v->lsa->hdr.age); 803 804 if (evtimer_add(&v->ev, &tv) != 0) 805 fatal("lsa_timeout"); 806 } 807 return; 808 } 809 810 if (v->self && ntohs(v->lsa->hdr.age) < MAX_AGE) 811 lsa_refresh(v); 812 813 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0, 814 v->lsa, ntohs(v->lsa->hdr.len)); 815 } 816 817 void 818 lsa_refresh(struct vertex *v) 819 { 820 struct timeval tv; 821 struct timespec tp; 822 u_int32_t seqnum; 823 u_int16_t len; 824 825 /* refresh LSA by increasing sequence number by one */ 826 v->lsa->hdr.age = htons(DEFAULT_AGE); 827 seqnum = ntohl(v->lsa->hdr.seq_num); 828 if (seqnum++ == MAX_SEQ_NUM) 829 /* XXX fix me */ 830 fatalx("sequence number wrapping"); 831 v->lsa->hdr.seq_num = htonl(seqnum); 832 833 /* recalculate checksum */ 834 len = ntohs(v->lsa->hdr.len); 835 v->lsa->hdr.ls_chksum = 0; 836 v->lsa->hdr.ls_chksum = htons(iso_cksum(v->lsa, len, LS_CKSUM_OFFSET)); 837 838 clock_gettime(CLOCK_MONOTONIC, &tp); 839 v->changed = v->stamp = tp.tv_sec; 840 841 timerclear(&tv); 842 tv.tv_sec = LS_REFRESH_TIME; 843 if (evtimer_add(&v->ev, &tv) == -1) 844 fatal("lsa_refresh"); 845 } 846 847 void 848 lsa_merge(struct rde_nbr *nbr, struct lsa *lsa, struct vertex *v) 849 { 850 struct timeval tv; 851 struct timespec tp; 852 time_t now; 853 u_int16_t len; 854 855 if (v == NULL) { 856 if (lsa_add(nbr, lsa)) 857 /* delayed update */ 858 return; 859 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, nbr->peerid, 0, 860 lsa, ntohs(lsa->hdr.len)); 861 return; 862 } 863 864 /* set the seq_num to the current one. lsa_refresh() will do the ++ */ 865 lsa->hdr.seq_num = v->lsa->hdr.seq_num; 866 /* recalculate checksum */ 867 len = ntohs(lsa->hdr.len); 868 lsa->hdr.ls_chksum = 0; 869 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 870 871 /* compare LSA most header fields are equal so don't check them */ 872 if (lsa_equal(lsa, v->lsa)) { 873 free(lsa); 874 return; 875 } 876 877 /* overwrite the lsa all other fields are unaffected */ 878 free(v->lsa); 879 v->lsa = lsa; 880 start_spf_timer(); 881 if (v->type != LSA_TYPE_EXTERNAL) 882 nbr->area->dirty = 1; 883 884 /* set correct timeout for reflooding the LSA */ 885 clock_gettime(CLOCK_MONOTONIC, &tp); 886 now = tp.tv_sec; 887 timerclear(&tv); 888 if (v->changed + MIN_LS_INTERVAL >= now) 889 tv.tv_sec = MIN_LS_INTERVAL; 890 if (evtimer_add(&v->ev, &tv) == -1) 891 fatal("lsa_merge"); 892 } 893 894 void 895 lsa_remove_invalid_sums(struct area *area) 896 { 897 struct lsa_tree *tree = &area->lsa_tree; 898 struct vertex *v, *nv; 899 900 /* XXX speed me up */ 901 for (v = RB_MIN(lsa_tree, tree); v != NULL; v = nv) { 902 nv = RB_NEXT(lsa_tree, tree, v); 903 if ((v->type == LSA_TYPE_INTER_A_PREFIX || 904 v->type == LSA_TYPE_INTER_A_ROUTER) && 905 v->self && v->cost == LS_INFINITY && 906 v->deleted == 0) { 907 /* 908 * age the lsa and call lsa_timeout() which will 909 * actually remove it from the database. 910 */ 911 v->lsa->hdr.age = htons(MAX_AGE); 912 lsa_timeout(0, 0, v); 913 } 914 } 915 } 916 917 int 918 lsa_equal(struct lsa *a, struct lsa *b) 919 { 920 /* 921 * compare LSA that already have same type, adv_rtr and ls_id 922 * so not all header need to be compared 923 */ 924 if (a == NULL || b == NULL) 925 return (0); 926 if (a->hdr.len != b->hdr.len) 927 return (0); 928 /* LSA with age MAX_AGE are never equal */ 929 if (a->hdr.age == htons(MAX_AGE) || b->hdr.age == htons(MAX_AGE)) 930 return (0); 931 if (memcmp(&a->data, &b->data, ntohs(a->hdr.len) - 932 sizeof(struct lsa_hdr))) 933 return (0); 934 935 return (1); 936 } 937 938 int 939 lsa_get_prefix(void *buf, u_int16_t len, struct rt_prefix *p) 940 { 941 struct lsa_prefix *lp = buf; 942 u_int32_t *buf32, *addr = NULL; 943 u_int8_t prefixlen; 944 u_int16_t consumed = 0; 945 946 if (len < sizeof(*lp)) 947 return (-1); 948 949 prefixlen = lp->prefixlen; 950 951 if (p) { 952 bzero(p, sizeof(*p)); 953 p->prefixlen = lp->prefixlen; 954 p->options = lp->options; 955 p->metric = ntohs(lp->metric); 956 addr = (u_int32_t *)&p->prefix; 957 } 958 959 buf32 = (u_int32_t *)(lp + 1); 960 consumed += sizeof(*lp); 961 962 for (; ((prefixlen + 31) / 32) > 0; prefixlen -= 32) { 963 if (len < consumed + sizeof(u_int32_t)) 964 return (-1); 965 if (addr) 966 *addr++ = *buf32++; 967 consumed += sizeof(u_int32_t); 968 } 969 970 return (consumed); 971 } 972 973