1 /* $OpenBSD: rde_spf.c,v 1.75 2012/09/18 18:58:56 bluhm Exp $ */ 2 3 /* 4 * Copyright (c) 2005 Esben Norby <norby@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/socket.h> 21 #include <netinet/in.h> 22 #include <arpa/inet.h> 23 #include <err.h> 24 #include <stdlib.h> 25 26 #include "ospfd.h" 27 #include "ospf.h" 28 #include "log.h" 29 #include "rde.h" 30 31 extern struct ospfd_conf *rdeconf; 32 TAILQ_HEAD(, vertex) cand_list; 33 RB_HEAD(rt_tree, rt_node) rt; 34 RB_PROTOTYPE(rt_tree, rt_node, entry, rt_compare) 35 RB_GENERATE(rt_tree, rt_node, entry, rt_compare) 36 struct vertex *spf_root = NULL; 37 38 void calc_nexthop(struct vertex *, struct vertex *, 39 struct area *, struct lsa_rtr_link *); 40 void rt_nexthop_clear(struct rt_node *); 41 void rt_nexthop_add(struct rt_node *, struct v_nexthead *, u_int8_t, 42 struct in_addr); 43 void rt_update(struct in_addr, u_int8_t, struct v_nexthead *, u_int8_t, 44 u_int32_t, u_int32_t, struct in_addr, struct in_addr, 45 enum path_type, enum dst_type, u_int8_t, u_int32_t); 46 void rt_invalidate(struct area *); 47 struct lsa_rtr_link *get_rtr_link(struct vertex *, int); 48 struct lsa_net_link *get_net_link(struct vertex *, int); 49 int linked(struct vertex *, struct vertex *); 50 51 void 52 spf_calc(struct area *area) 53 { 54 struct vertex *v, *w; 55 struct lsa_rtr_link *rtr_link = NULL; 56 struct lsa_net_link *net_link; 57 u_int32_t d; 58 int i; 59 struct in_addr addr; 60 61 /* clear SPF tree */ 62 spf_tree_clr(area); 63 cand_list_clr(); 64 65 /* initialize SPF tree */ 66 if ((v = spf_root = lsa_find_area(area, LSA_TYPE_ROUTER, 67 rde_router_id(), rde_router_id())) == NULL) 68 /* empty area because no interface is active */ 69 return; 70 71 area->transit = 0; 72 spf_root->cost = 0; 73 w = NULL; 74 75 /* calculate SPF tree */ 76 do { 77 /* loop links */ 78 for (i = 0; i < lsa_num_links(v); i++) { 79 switch (v->type) { 80 case LSA_TYPE_ROUTER: 81 rtr_link = get_rtr_link(v, i); 82 switch (rtr_link->type) { 83 case LINK_TYPE_STUB_NET: 84 /* skip */ 85 continue; 86 case LINK_TYPE_POINTTOPOINT: 87 case LINK_TYPE_VIRTUAL: 88 /* find router LSA */ 89 w = lsa_find_area(area, LSA_TYPE_ROUTER, 90 rtr_link->id, rtr_link->id); 91 break; 92 case LINK_TYPE_TRANSIT_NET: 93 /* find network LSA */ 94 w = lsa_find_net(area, rtr_link->id); 95 break; 96 default: 97 fatalx("spf_calc: invalid link type"); 98 } 99 break; 100 case LSA_TYPE_NETWORK: 101 net_link = get_net_link(v, i); 102 /* find router LSA */ 103 w = lsa_find_area(area, LSA_TYPE_ROUTER, 104 net_link->att_rtr, net_link->att_rtr); 105 break; 106 default: 107 fatalx("spf_calc: invalid LSA type"); 108 } 109 110 if (w == NULL) 111 continue; 112 113 if (w->lsa->hdr.age == MAX_AGE) 114 continue; 115 116 if (!linked(w, v)) { 117 addr.s_addr = htonl(w->ls_id); 118 log_debug("spf_calc: w id %s type %d has ", 119 inet_ntoa(addr), w->type); 120 addr.s_addr = htonl(v->ls_id); 121 log_debug(" no link to v id %s type %d", 122 inet_ntoa(addr), v->type); 123 continue; 124 } 125 126 if (v->type == LSA_TYPE_ROUTER) 127 d = v->cost + ntohs(rtr_link->metric); 128 else 129 d = v->cost; 130 131 if (cand_list_present(w)) { 132 if (d > w->cost) 133 continue; 134 if (d < w->cost) { 135 w->cost = d; 136 vertex_nexthop_clear(w); 137 calc_nexthop(w, v, area, rtr_link); 138 /* 139 * need to readd to candidate list 140 * because the list is sorted 141 */ 142 TAILQ_REMOVE(&cand_list, w, cand); 143 cand_list_add(w); 144 } else 145 /* equal cost path */ 146 calc_nexthop(w, v, area, rtr_link); 147 } else if (w->cost == LS_INFINITY && d < LS_INFINITY) { 148 w->cost = d; 149 150 vertex_nexthop_clear(w); 151 calc_nexthop(w, v, area, rtr_link); 152 cand_list_add(w); 153 } 154 } 155 156 /* get next vertex */ 157 v = cand_list_pop(); 158 w = NULL; 159 } while (v != NULL); 160 161 /* spf_dump(area); */ 162 log_debug("spf_calc: area %s calculated", 163 inet_ntoa(area->id)); 164 165 area->num_spf_calc++; 166 start_spf_timer(); 167 } 168 169 void 170 rt_calc(struct vertex *v, struct area *area, struct ospfd_conf *conf) 171 { 172 struct vertex *w; 173 struct v_nexthop *vn; 174 struct lsa_rtr_link *rtr_link = NULL; 175 int i; 176 struct in_addr addr, adv_rtr; 177 178 lsa_age(v); 179 if (ntohs(v->lsa->hdr.age) == MAX_AGE) 180 return; 181 182 switch (v->type) { 183 case LSA_TYPE_ROUTER: 184 /* stub networks */ 185 if (v->cost >= LS_INFINITY || TAILQ_EMPTY(&v->nexthop)) 186 return; 187 188 for (i = 0; i < lsa_num_links(v); i++) { 189 rtr_link = get_rtr_link(v, i); 190 if (rtr_link->type != LINK_TYPE_STUB_NET) 191 continue; 192 193 addr.s_addr = rtr_link->id; 194 adv_rtr.s_addr = htonl(v->adv_rtr); 195 196 rt_update(addr, mask2prefixlen(rtr_link->data), 197 &v->nexthop, v->type, 198 v->cost + ntohs(rtr_link->metric), 0, 199 area->id, adv_rtr, PT_INTRA_AREA, DT_NET, 200 v->lsa->data.rtr.flags, 0); 201 } 202 203 /* router, only add border and as-external routers */ 204 if ((v->lsa->data.rtr.flags & (OSPF_RTR_B | OSPF_RTR_E)) == 0) 205 return; 206 207 addr.s_addr = htonl(v->ls_id); 208 adv_rtr.s_addr = htonl(v->adv_rtr); 209 210 rt_update(addr, 32, &v->nexthop, v->type, v->cost, 0, area->id, 211 adv_rtr, PT_INTRA_AREA, DT_RTR, v->lsa->data.rtr.flags, 0); 212 break; 213 case LSA_TYPE_NETWORK: 214 if (v->cost >= LS_INFINITY || TAILQ_EMPTY(&v->nexthop)) 215 return; 216 217 addr.s_addr = htonl(v->ls_id) & v->lsa->data.net.mask; 218 adv_rtr.s_addr = htonl(v->adv_rtr); 219 rt_update(addr, mask2prefixlen(v->lsa->data.net.mask), 220 &v->nexthop, v->type, v->cost, 0, area->id, adv_rtr, 221 PT_INTRA_AREA, DT_NET, 0, 0); 222 break; 223 case LSA_TYPE_SUM_NETWORK: 224 case LSA_TYPE_SUM_ROUTER: 225 /* if ABR only look at area 0.0.0.0 LSA */ 226 if (area_border_router(conf) && area->id.s_addr != INADDR_ANY) 227 return; 228 229 /* ignore self-originated stuff */ 230 if (v->self) 231 return; 232 233 /* TODO type 3 area address range check */ 234 235 if ((w = lsa_find_area(area, LSA_TYPE_ROUTER, 236 htonl(v->adv_rtr), 237 htonl(v->adv_rtr))) == NULL) 238 return; 239 240 /* copy nexthops */ 241 vertex_nexthop_clear(v); /* XXX needed ??? */ 242 TAILQ_FOREACH(vn, &w->nexthop, entry) 243 vertex_nexthop_add(v, w, vn->nexthop.s_addr); 244 245 v->cost = w->cost + 246 (ntohl(v->lsa->data.sum.metric) & LSA_METRIC_MASK); 247 248 if (v->cost >= LS_INFINITY || TAILQ_EMPTY(&v->nexthop)) 249 return; 250 251 adv_rtr.s_addr = htonl(v->adv_rtr); 252 if (v->type == LSA_TYPE_SUM_NETWORK) { 253 addr.s_addr = htonl(v->ls_id) & v->lsa->data.sum.mask; 254 rt_update(addr, mask2prefixlen(v->lsa->data.sum.mask), 255 &v->nexthop, v->type, v->cost, 0, area->id, adv_rtr, 256 PT_INTER_AREA, DT_NET, 0, 0); 257 } else { 258 addr.s_addr = htonl(v->ls_id); 259 rt_update(addr, 32, &v->nexthop, v->type, v->cost, 0, 260 area->id, adv_rtr, PT_INTER_AREA, DT_RTR, 261 v->lsa->data.rtr.flags, 0); 262 } 263 264 break; 265 case LSA_TYPE_AREA_OPAQ: 266 /* nothing to calculate */ 267 break; 268 default: 269 /* as-external LSA are stored in a different tree */ 270 fatalx("rt_calc: invalid LSA type"); 271 } 272 } 273 274 void 275 asext_calc(struct vertex *v) 276 { 277 struct rt_node *r; 278 struct rt_nexthop *rn; 279 u_int32_t cost2; 280 struct in_addr addr, adv_rtr, a; 281 enum path_type type; 282 283 lsa_age(v); 284 if (ntohs(v->lsa->hdr.age) == MAX_AGE || 285 (ntohl(v->lsa->data.asext.metric) & LSA_METRIC_MASK) >= 286 LS_INFINITY) 287 return; 288 289 switch (v->type) { 290 case LSA_TYPE_EXTERNAL: 291 /* ignore self-originated stuff */ 292 if (v->self) 293 return; 294 295 if ((r = rt_lookup(DT_RTR, htonl(v->adv_rtr))) == NULL) 296 return; 297 298 /* XXX RFC1583Compatibility */ 299 if (v->lsa->data.asext.fw_addr != 0 && 300 (r = rt_lookup(DT_NET, v->lsa->data.asext.fw_addr)) == NULL) 301 return; 302 303 if (v->lsa->data.asext.fw_addr != 0 && 304 r->p_type != PT_INTRA_AREA && 305 r->p_type != PT_INTER_AREA) 306 return; 307 308 if (ntohl(v->lsa->data.asext.metric) & LSA_ASEXT_E_FLAG) { 309 v->cost = r->cost; 310 cost2 = ntohl(v->lsa->data.asext.metric) & 311 LSA_METRIC_MASK; 312 type = PT_TYPE2_EXT; 313 } else { 314 v->cost = r->cost + (ntohl(v->lsa->data.asext.metric) & 315 LSA_METRIC_MASK); 316 cost2 = 0; 317 type = PT_TYPE1_EXT; 318 } 319 320 a.s_addr = 0; 321 adv_rtr.s_addr = htonl(v->adv_rtr); 322 addr.s_addr = htonl(v->ls_id) & v->lsa->data.asext.mask; 323 324 vertex_nexthop_clear(v); 325 TAILQ_FOREACH(rn, &r->nexthop, entry) { 326 if (rn->invalid) 327 continue; 328 329 /* 330 * if a fw_addr is specified and the nexthop 331 * is directly connected then it is possible to 332 * send traffic directly to fw_addr. 333 */ 334 if (v->lsa->data.asext.fw_addr != 0 && rn->connected) 335 vertex_nexthop_add(v, NULL, 336 v->lsa->data.asext.fw_addr); 337 else 338 vertex_nexthop_add(v, NULL, rn->nexthop.s_addr); 339 } 340 341 rt_update(addr, mask2prefixlen(v->lsa->data.asext.mask), 342 &v->nexthop, v->type, v->cost, cost2, a, adv_rtr, type, 343 DT_NET, 0, ntohl(v->lsa->data.asext.ext_tag)); 344 break; 345 case LSA_TYPE_AS_OPAQ: 346 /* nothing to calculate */ 347 break; 348 default: 349 fatalx("asext_calc: invalid LSA type"); 350 } 351 } 352 353 void 354 spf_tree_clr(struct area *area) 355 { 356 struct lsa_tree *tree = &area->lsa_tree; 357 struct vertex *v; 358 359 RB_FOREACH(v, lsa_tree, tree) { 360 v->cost = LS_INFINITY; 361 vertex_nexthop_clear(v); 362 } 363 } 364 365 void 366 calc_nexthop(struct vertex *dst, struct vertex *parent, 367 struct area *area, struct lsa_rtr_link *rtr_link) 368 { 369 struct v_nexthop *vn; 370 struct iface *iface; 371 int i; 372 373 /* case 1 */ 374 if (parent == spf_root) { 375 switch (dst->type) { 376 case LSA_TYPE_ROUTER: 377 if (rtr_link->type != LINK_TYPE_POINTTOPOINT) 378 fatalx("inconsistent SPF tree"); 379 LIST_FOREACH(iface, &area->iface_list, entry) { 380 if (rtr_link->data == iface->addr.s_addr) { 381 vertex_nexthop_add(dst, parent, 382 iface->dst.s_addr); 383 return; 384 } 385 } 386 fatalx("no interface found for interface"); 387 case LSA_TYPE_NETWORK: 388 switch (rtr_link->type) { 389 case LINK_TYPE_POINTTOPOINT: 390 case LINK_TYPE_STUB_NET: 391 /* ignore */ 392 break; 393 case LINK_TYPE_TRANSIT_NET: 394 if ((htonl(dst->ls_id) & 395 dst->lsa->data.net.mask) == 396 (rtr_link->data & 397 dst->lsa->data.net.mask)) { 398 vertex_nexthop_add(dst, parent, 399 rtr_link->data); 400 } 401 break; 402 default: 403 fatalx("calc_nexthop: invalid link " 404 "type"); 405 } 406 return; 407 default: 408 fatalx("calc_nexthop: invalid dst type"); 409 } 410 return; 411 } 412 413 /* case 2 */ 414 if (parent->type == LSA_TYPE_NETWORK && dst->type == LSA_TYPE_ROUTER) { 415 TAILQ_FOREACH(vn, &parent->nexthop, entry) { 416 if (vn->prev == spf_root) { 417 for (i = 0; i < lsa_num_links(dst); i++) { 418 rtr_link = get_rtr_link(dst, i); 419 if ((rtr_link->type == 420 LINK_TYPE_TRANSIT_NET) && 421 (rtr_link->data & 422 parent->lsa->data.net.mask) == 423 (htonl(parent->ls_id) & 424 parent->lsa->data.net.mask)) 425 vertex_nexthop_add(dst, parent, 426 rtr_link->data); 427 } 428 } else { 429 vertex_nexthop_add(dst, parent, 430 vn->nexthop.s_addr); 431 } 432 } 433 return; 434 } 435 436 /* case 3 */ 437 TAILQ_FOREACH(vn, &parent->nexthop, entry) 438 vertex_nexthop_add(dst, parent, vn->nexthop.s_addr); 439 } 440 441 /* candidate list */ 442 void 443 cand_list_init(void) 444 { 445 TAILQ_INIT(&cand_list); 446 } 447 448 void 449 cand_list_add(struct vertex *v) 450 { 451 struct vertex *c = NULL; 452 453 TAILQ_FOREACH(c, &cand_list, cand) { 454 if (c->cost > v->cost) { 455 TAILQ_INSERT_BEFORE(c, v, cand); 456 return; 457 } else if (c->cost == v->cost && c->type == LSA_TYPE_ROUTER && 458 v->type == LSA_TYPE_NETWORK) { 459 TAILQ_INSERT_BEFORE(c, v, cand); 460 return; 461 } 462 } 463 TAILQ_INSERT_TAIL(&cand_list, v, cand); 464 } 465 466 struct vertex * 467 cand_list_pop(void) 468 { 469 struct vertex *c; 470 471 if ((c = TAILQ_FIRST(&cand_list)) != NULL) { 472 TAILQ_REMOVE(&cand_list, c, cand); 473 } 474 475 return (c); 476 } 477 478 int 479 cand_list_present(struct vertex *v) 480 { 481 struct vertex *c; 482 483 TAILQ_FOREACH(c, &cand_list, cand) { 484 if (c == v) 485 return (1); 486 } 487 488 return (0); 489 } 490 491 void 492 cand_list_clr(void) 493 { 494 struct vertex *c; 495 496 while ((c = TAILQ_FIRST(&cand_list)) != NULL) { 497 TAILQ_REMOVE(&cand_list, c, cand); 498 } 499 } 500 501 /* timers */ 502 /* ARGSUSED */ 503 void 504 spf_timer(int fd, short event, void *arg) 505 { 506 struct vertex *v; 507 struct ospfd_conf *conf = arg; 508 struct area *area; 509 struct rt_node *r; 510 511 switch (conf->spf_state) { 512 case SPF_IDLE: 513 fatalx("spf_timer: invalid state IDLE"); 514 case SPF_HOLDQUEUE: 515 conf->spf_state = SPF_DELAY; 516 /* FALLTHROUGH */ 517 case SPF_DELAY: 518 LIST_FOREACH(area, &conf->area_list, entry) { 519 if (area->dirty) { 520 /* invalidate RIB entries of this area */ 521 rt_invalidate(area); 522 523 /* calculate SPF tree */ 524 spf_calc(area); 525 526 /* calculate route table */ 527 RB_FOREACH(v, lsa_tree, &area->lsa_tree) { 528 rt_calc(v, area, conf); 529 } 530 531 area->dirty = 0; 532 } 533 } 534 535 /* calculate as-external routes, first invalidate them */ 536 rt_invalidate(NULL); 537 RB_FOREACH(v, lsa_tree, &asext_tree) { 538 asext_calc(v); 539 } 540 541 RB_FOREACH(r, rt_tree, &rt) { 542 LIST_FOREACH(area, &conf->area_list, entry) 543 rde_summary_update(r, area); 544 545 if (r->d_type != DT_NET) 546 continue; 547 548 if (r->invalid) 549 rde_send_delete_kroute(r); 550 else 551 rde_send_change_kroute(r); 552 } 553 554 LIST_FOREACH(area, &conf->area_list, entry) { 555 lsa_generate_stub_sums(area); 556 lsa_remove_invalid_sums(area); 557 } 558 559 start_spf_holdtimer(conf); 560 break; 561 case SPF_HOLD: 562 conf->spf_state = SPF_IDLE; 563 break; 564 default: 565 fatalx("spf_timer: unknown state"); 566 } 567 } 568 569 void 570 start_spf_timer(void) 571 { 572 struct timeval tv; 573 574 switch (rdeconf->spf_state) { 575 case SPF_IDLE: 576 timerclear(&tv); 577 tv.tv_sec = rdeconf->spf_delay / 1000; 578 tv.tv_usec = (rdeconf->spf_delay % 1000) * 1000; 579 rdeconf->spf_state = SPF_DELAY; 580 if (evtimer_add(&rdeconf->ev, &tv) == -1) 581 fatal("start_spf_timer"); 582 break; 583 case SPF_DELAY: 584 /* ignore */ 585 break; 586 case SPF_HOLD: 587 rdeconf->spf_state = SPF_HOLDQUEUE; 588 break; 589 case SPF_HOLDQUEUE: 590 /* ignore */ 591 break; 592 default: 593 fatalx("start_spf_timer: invalid spf_state"); 594 } 595 } 596 597 void 598 stop_spf_timer(struct ospfd_conf *conf) 599 { 600 if (evtimer_del(&conf->ev) == -1) 601 fatal("stop_spf_timer"); 602 } 603 604 void 605 start_spf_holdtimer(struct ospfd_conf *conf) 606 { 607 struct timeval tv; 608 609 switch (conf->spf_state) { 610 case SPF_DELAY: 611 timerclear(&tv); 612 tv.tv_sec = rdeconf->spf_hold_time / 1000; 613 tv.tv_usec = (rdeconf->spf_hold_time % 1000) * 1000; 614 conf->spf_state = SPF_HOLD; 615 if (evtimer_add(&conf->ev, &tv) == -1) 616 fatal("start_spf_holdtimer"); 617 break; 618 case SPF_IDLE: 619 case SPF_HOLD: 620 case SPF_HOLDQUEUE: 621 fatalx("start_spf_holdtimer: invalid state"); 622 default: 623 fatalx("start_spf_holdtimer: unknown state"); 624 } 625 } 626 627 /* route table */ 628 void 629 rt_init(void) 630 { 631 RB_INIT(&rt); 632 } 633 634 int 635 rt_compare(struct rt_node *a, struct rt_node *b) 636 { 637 if (ntohl(a->prefix.s_addr) < ntohl(b->prefix.s_addr)) 638 return (-1); 639 if (ntohl(a->prefix.s_addr) > ntohl(b->prefix.s_addr)) 640 return (1); 641 if (a->prefixlen < b->prefixlen) 642 return (-1); 643 if (a->prefixlen > b->prefixlen) 644 return (1); 645 if (a->d_type > b->d_type) 646 return (-1); 647 if (a->d_type < b->d_type) 648 return (1); 649 return (0); 650 } 651 652 struct rt_node * 653 rt_find(in_addr_t prefix, u_int8_t prefixlen, enum dst_type d_type) 654 { 655 struct rt_node s; 656 657 s.prefix.s_addr = prefix; 658 s.prefixlen = prefixlen; 659 s.d_type = d_type; 660 661 return (RB_FIND(rt_tree, &rt, &s)); 662 } 663 664 int 665 rt_insert(struct rt_node *r) 666 { 667 if (RB_INSERT(rt_tree, &rt, r) != NULL) { 668 log_warnx("rt_insert failed for %s/%u", 669 inet_ntoa(r->prefix), r->prefixlen); 670 free(r); 671 return (-1); 672 } 673 674 return (0); 675 } 676 677 int 678 rt_remove(struct rt_node *r) 679 { 680 if (RB_REMOVE(rt_tree, &rt, r) == NULL) { 681 log_warnx("rt_remove failed for %s/%u", 682 inet_ntoa(r->prefix), r->prefixlen); 683 return (-1); 684 } 685 686 rt_nexthop_clear(r); 687 free(r); 688 return (0); 689 } 690 691 void 692 rt_invalidate(struct area *area) 693 { 694 struct rt_node *r, *nr; 695 struct rt_nexthop *rn, *nrn; 696 697 for (r = RB_MIN(rt_tree, &rt); r != NULL; r = nr) { 698 nr = RB_NEXT(rt_tree, &rt, r); 699 if (area == NULL) { 700 /* look only at as_ext routes */ 701 if (r->p_type != PT_TYPE1_EXT && 702 r->p_type != PT_TYPE2_EXT) 703 continue; 704 } else { 705 /* ignore all as_ext routes */ 706 if (r->p_type == PT_TYPE1_EXT || 707 r->p_type == PT_TYPE2_EXT) 708 continue; 709 710 /* look only at routes matching the area */ 711 if (r->area.s_addr != area->id.s_addr) 712 continue; 713 } 714 r->invalid = 1; 715 for (rn = TAILQ_FIRST(&r->nexthop); rn != NULL; rn = nrn) { 716 nrn = TAILQ_NEXT(rn, entry); 717 if (rn->invalid) { 718 TAILQ_REMOVE(&r->nexthop, rn, entry); 719 free(rn); 720 } else 721 rn->invalid = 1; 722 } 723 if (TAILQ_EMPTY(&r->nexthop)) 724 rt_remove(r); 725 } 726 } 727 728 void 729 rt_nexthop_clear(struct rt_node *r) 730 { 731 struct rt_nexthop *rn; 732 733 while ((rn = TAILQ_FIRST(&r->nexthop)) != NULL) { 734 TAILQ_REMOVE(&r->nexthop, rn, entry); 735 free(rn); 736 } 737 } 738 739 void 740 rt_nexthop_add(struct rt_node *r, struct v_nexthead *vnh, u_int8_t type, 741 struct in_addr adv_rtr) 742 { 743 struct v_nexthop *vn; 744 struct rt_nexthop *rn; 745 struct timespec now; 746 747 TAILQ_FOREACH(vn, vnh, entry) { 748 TAILQ_FOREACH(rn, &r->nexthop, entry) { 749 if (rn->nexthop.s_addr != vn->nexthop.s_addr) 750 continue; 751 752 rn->adv_rtr.s_addr = adv_rtr.s_addr; 753 rn->connected = (type == LSA_TYPE_NETWORK && 754 vn->prev == spf_root); 755 rn->invalid = 0; 756 757 r->invalid = 0; 758 break; 759 } 760 if (rn) 761 continue; 762 763 if ((rn = calloc(1, sizeof(struct rt_nexthop))) == NULL) 764 fatal("rt_nexthop_add"); 765 766 clock_gettime(CLOCK_MONOTONIC, &now); 767 rn->nexthop.s_addr = vn->nexthop.s_addr; 768 rn->adv_rtr.s_addr = adv_rtr.s_addr; 769 rn->uptime = now.tv_sec; 770 rn->connected = (type == LSA_TYPE_NETWORK && 771 vn->prev == spf_root); 772 rn->invalid = 0; 773 774 r->invalid = 0; 775 TAILQ_INSERT_TAIL(&r->nexthop, rn, entry); 776 } 777 } 778 779 void 780 rt_clear(void) 781 { 782 struct rt_node *r; 783 784 while ((r = RB_MIN(rt_tree, &rt)) != NULL) 785 rt_remove(r); 786 } 787 788 void 789 rt_dump(struct in_addr area, pid_t pid, u_int8_t r_type) 790 { 791 static struct ctl_rt rtctl; 792 struct timespec now; 793 struct rt_node *r; 794 struct rt_nexthop *rn; 795 796 clock_gettime(CLOCK_MONOTONIC, &now); 797 798 RB_FOREACH(r, rt_tree, &rt) { 799 if (r->invalid) 800 continue; 801 802 if (r->area.s_addr != area.s_addr) 803 continue; 804 805 switch (r_type) { 806 case RIB_RTR: 807 if (r->d_type != DT_RTR) 808 continue; 809 break; 810 case RIB_NET: 811 if (r->d_type != DT_NET) 812 continue; 813 if (r->p_type == PT_TYPE1_EXT || 814 r->p_type == PT_TYPE2_EXT) 815 continue; 816 break; 817 case RIB_EXT: 818 if (r->p_type != PT_TYPE1_EXT && 819 r->p_type != PT_TYPE2_EXT) 820 continue; 821 break; 822 default: 823 fatalx("rt_dump: invalid RIB type"); 824 } 825 826 TAILQ_FOREACH(rn, &r->nexthop, entry) { 827 if (rn->invalid) 828 continue; 829 830 rtctl.prefix.s_addr = r->prefix.s_addr; 831 rtctl.nexthop.s_addr = rn->nexthop.s_addr; 832 rtctl.area.s_addr = r->area.s_addr; 833 rtctl.adv_rtr.s_addr = rn->adv_rtr.s_addr; 834 rtctl.cost = r->cost; 835 rtctl.cost2 = r->cost2; 836 rtctl.p_type = r->p_type; 837 rtctl.d_type = r->d_type; 838 rtctl.flags = r->flags; 839 rtctl.prefixlen = r->prefixlen; 840 rtctl.uptime = now.tv_sec - rn->uptime; 841 842 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_RIB, 0, pid, 843 &rtctl, sizeof(rtctl)); 844 } 845 } 846 } 847 848 void 849 rt_update(struct in_addr prefix, u_int8_t prefixlen, struct v_nexthead *vnh, 850 u_int8_t v_type, u_int32_t cost, u_int32_t cost2, struct in_addr area, 851 struct in_addr adv_rtr, enum path_type p_type, enum dst_type d_type, 852 u_int8_t flags, u_int32_t tag) 853 { 854 struct rt_node *rte; 855 struct rt_nexthop *rn; 856 int better = 0, equal = 0; 857 858 if (vnh == NULL || TAILQ_EMPTY(vnh)) /* XXX remove */ 859 fatalx("rt_update: invalid nexthop"); 860 861 if ((rte = rt_find(prefix.s_addr, prefixlen, d_type)) == NULL) { 862 if ((rte = calloc(1, sizeof(struct rt_node))) == NULL) 863 fatal("rt_update"); 864 865 TAILQ_INIT(&rte->nexthop); 866 rte->prefix.s_addr = prefix.s_addr; 867 rte->prefixlen = prefixlen; 868 rte->cost = cost; 869 rte->cost2 = cost2; 870 rte->area = area; 871 rte->p_type = p_type; 872 rte->d_type = d_type; 873 rte->flags = flags; 874 rte->ext_tag = tag; 875 876 rt_nexthop_add(rte, vnh, v_type, adv_rtr); 877 878 rt_insert(rte); 879 } else { 880 /* order: 881 * 1. intra-area 882 * 2. inter-area 883 * 3. type 1 as ext 884 * 4. type 2 as ext 885 */ 886 if (rte->invalid) /* everything is better than invalid */ 887 better = 1; 888 else if (p_type < rte->p_type) 889 better = 1; 890 else if (p_type == rte->p_type) 891 switch (p_type) { 892 case PT_INTRA_AREA: 893 case PT_INTER_AREA: 894 if (cost < rte->cost) 895 better = 1; 896 else if (cost == rte->cost && 897 rte->area.s_addr == area.s_addr) 898 equal = 1; 899 break; 900 case PT_TYPE1_EXT: 901 /* XXX rfc1583 compat */ 902 if (cost < rte->cost) 903 better = 1; 904 else if (cost == rte->cost) 905 equal = 1; 906 break; 907 case PT_TYPE2_EXT: 908 if (cost2 < rte->cost2) 909 better = 1; 910 /* XXX rfc1583 compat */ 911 else if (cost2 == rte->cost2 && 912 cost < rte->cost) 913 better = 1; 914 else if (cost2 == rte->cost2 && 915 cost == rte->cost) 916 equal = 1; 917 break; 918 } 919 920 if (better) { 921 TAILQ_FOREACH(rn, &rte->nexthop, entry) 922 rn->invalid = 1; 923 924 rte->area = area; 925 rte->cost = cost; 926 rte->cost2 = cost2; 927 rte->p_type = p_type; 928 rte->flags = flags; 929 rte->ext_tag = tag; 930 } 931 932 if (equal || better) 933 rt_nexthop_add(rte, vnh, v_type, adv_rtr); 934 } 935 } 936 937 struct rt_node * 938 rt_lookup(enum dst_type type, in_addr_t addr) 939 { 940 struct rt_node *rn; 941 u_int8_t i = 32; 942 943 if (type == DT_RTR) { 944 rn = rt_find(addr, 32, type); 945 if (rn && rn->invalid == 0) 946 return (rn); 947 return (NULL); 948 } 949 950 /* type == DT_NET */ 951 do { 952 if ((rn = rt_find(addr & prefixlen2mask(i), i, type)) && 953 rn->invalid == 0) 954 return (rn); 955 } while (i-- != 0); 956 957 return (NULL); 958 } 959 960 /* router LSA links */ 961 struct lsa_rtr_link * 962 get_rtr_link(struct vertex *v, int idx) 963 { 964 struct lsa_rtr_link *rtr_link = NULL; 965 char *buf = (char *)v->lsa; 966 u_int16_t i, off, nlinks; 967 968 if (v->type != LSA_TYPE_ROUTER) 969 fatalx("get_rtr_link: invalid LSA type"); 970 971 off = sizeof(v->lsa->hdr) + sizeof(struct lsa_rtr); 972 973 /* nlinks validated earlier by lsa_check() */ 974 nlinks = lsa_num_links(v); 975 for (i = 0; i < nlinks; i++) { 976 rtr_link = (struct lsa_rtr_link *)(buf + off); 977 if (i == idx) 978 return (rtr_link); 979 980 off += sizeof(struct lsa_rtr_link) + 981 rtr_link->num_tos * sizeof(u_int32_t); 982 } 983 984 fatalx("get_rtr_link: index not found"); 985 } 986 987 /* network LSA links */ 988 struct lsa_net_link * 989 get_net_link(struct vertex *v, int idx) 990 { 991 struct lsa_net_link *net_link = NULL; 992 char *buf = (char *)v->lsa; 993 u_int16_t i, off, nlinks; 994 995 if (v->type != LSA_TYPE_NETWORK) 996 fatalx("get_net_link: invalid LSA type"); 997 998 off = sizeof(v->lsa->hdr) + sizeof(u_int32_t); 999 1000 /* nlinks validated earlier by lsa_check() */ 1001 nlinks = lsa_num_links(v); 1002 for (i = 0; i < nlinks; i++) { 1003 net_link = (struct lsa_net_link *)(buf + off); 1004 if (i == idx) 1005 return (net_link); 1006 1007 off += sizeof(struct lsa_net_link); 1008 } 1009 1010 fatalx("get_net_link: index not found"); 1011 } 1012 1013 /* misc */ 1014 int 1015 linked(struct vertex *w, struct vertex *v) 1016 { 1017 struct lsa_rtr_link *rtr_link = NULL; 1018 struct lsa_net_link *net_link = NULL; 1019 int i; 1020 1021 switch (w->type) { 1022 case LSA_TYPE_ROUTER: 1023 for (i = 0; i < lsa_num_links(w); i++) { 1024 rtr_link = get_rtr_link(w, i); 1025 switch (v->type) { 1026 case LSA_TYPE_ROUTER: 1027 if (rtr_link->type == LINK_TYPE_POINTTOPOINT && 1028 rtr_link->id == htonl(v->ls_id)) 1029 return (1); 1030 break; 1031 case LSA_TYPE_NETWORK: 1032 if (rtr_link->id == htonl(v->ls_id)) 1033 return (1); 1034 break; 1035 default: 1036 fatalx("linked: invalid type"); 1037 } 1038 } 1039 return (0); 1040 case LSA_TYPE_NETWORK: 1041 for (i = 0; i < lsa_num_links(w); i++) { 1042 net_link = get_net_link(w, i); 1043 switch (v->type) { 1044 case LSA_TYPE_ROUTER: 1045 if (net_link->att_rtr == htonl(v->ls_id)) 1046 return (1); 1047 break; 1048 default: 1049 fatalx("linked: invalid type"); 1050 } 1051 } 1052 return (0); 1053 default: 1054 fatalx("linked: invalid LSA type"); 1055 } 1056 1057 return (0); 1058 } 1059