1 /* $OpenBSD$ */
2
3 /*
4 * Copyright (c) 2019 Claudio Jeker <claudio@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include <sys/tree.h>
19 #include <sys/types.h>
20 #include <sys/socket.h>
21 #include <ifaddrs.h>
22 #include <stdlib.h>
23 #include <string.h>
24
25 #include "bgpd.h"
26 #include "session.h"
27 #include "log.h"
28
29 struct kroute_node {
30 RB_ENTRY(kroute_node) entry;
31 struct kroute r;
32 struct kroute_node *next;
33 };
34
35 struct kroute6_node {
36 RB_ENTRY(kroute6_node) entry;
37 struct kroute6 r;
38 struct kroute6_node *next;
39 };
40
41 struct knexthop_node {
42 RB_ENTRY(knexthop_node) entry;
43 struct bgpd_addr nexthop;
44 void *kroute;
45 };
46
47 struct kredist_node {
48 RB_ENTRY(kredist_node) entry;
49 struct bgpd_addr prefix;
50 u_int64_t rd;
51 u_int8_t prefixlen;
52 u_int8_t dynamic;
53 };
54
55 struct ktable krt;
56 const u_int krt_size = 1;
57
58 struct ktable *ktable_get(u_int);
59
60 static u_int8_t mask2prefixlen(in_addr_t);
61 static u_int8_t mask2prefixlen6(struct sockaddr_in6 *);
62
63 static inline int
knexthop_compare(struct knexthop_node * a,struct knexthop_node * b)64 knexthop_compare(struct knexthop_node *a, struct knexthop_node *b)
65 {
66 int i;
67
68 if (a->nexthop.aid != b->nexthop.aid)
69 return (b->nexthop.aid - a->nexthop.aid);
70
71 switch (a->nexthop.aid) {
72 case AID_INET:
73 if (ntohl(a->nexthop.v4.s_addr) < ntohl(b->nexthop.v4.s_addr))
74 return (-1);
75 if (ntohl(a->nexthop.v4.s_addr) > ntohl(b->nexthop.v4.s_addr))
76 return (1);
77 break;
78 case AID_INET6:
79 for (i = 0; i < 16; i++) {
80 if (a->nexthop.v6.s6_addr[i] < b->nexthop.v6.s6_addr[i])
81 return (-1);
82 if (a->nexthop.v6.s6_addr[i] > b->nexthop.v6.s6_addr[i])
83 return (1);
84 }
85 break;
86 default:
87 fatalx("%s: unknown AF", __func__);
88 }
89
90 return (0);
91 }
92
93 static inline int
kredist_compare(struct kredist_node * a,struct kredist_node * b)94 kredist_compare(struct kredist_node *a, struct kredist_node *b)
95 {
96 int i;
97
98 if (a->prefix.aid != b->prefix.aid)
99 return (b->prefix.aid - a->prefix.aid);
100
101 if (a->prefixlen < b->prefixlen)
102 return (-1);
103 if (a->prefixlen > b->prefixlen)
104 return (1);
105
106 switch (a->prefix.aid) {
107 case AID_INET:
108 if (ntohl(a->prefix.v4.s_addr) < ntohl(b->prefix.v4.s_addr))
109 return (-1);
110 if (ntohl(a->prefix.v4.s_addr) > ntohl(b->prefix.v4.s_addr))
111 return (1);
112 break;
113 case AID_INET6:
114 for (i = 0; i < 16; i++) {
115 if (a->prefix.v6.s6_addr[i] < b->prefix.v6.s6_addr[i])
116 return (-1);
117 if (a->prefix.v6.s6_addr[i] > b->prefix.v6.s6_addr[i])
118 return (1);
119 }
120 break;
121 default:
122 fatalx("%s: unknown AF", __func__);
123 }
124
125 if (a->rd < b->rd)
126 return (-1);
127 if (a->rd > b->rd)
128 return (1);
129
130 return (0);
131 }
132
133 RB_PROTOTYPE(knexthop_tree, knexthop_node, entry, knexthop_compare)
134 RB_GENERATE(knexthop_tree, knexthop_node, entry, knexthop_compare)
135
136 RB_PROTOTYPE(kredist_tree, kredist_node, entry, kredist_compare)
137 RB_GENERATE(kredist_tree, kredist_node, entry, kredist_compare)
138
139 #define KT2KNT(x) (&(ktable_get((x)->nhtableid)->knt))
140
141 void knexthop_send_update(struct knexthop_node *);
142
143 static struct knexthop_node *
knexthop_find(struct ktable * kt,struct bgpd_addr * addr)144 knexthop_find(struct ktable *kt, struct bgpd_addr *addr)
145 {
146 struct knexthop_node s;
147
148 bzero(&s, sizeof(s));
149 memcpy(&s.nexthop, addr, sizeof(s.nexthop));
150
151 return (RB_FIND(knexthop_tree, KT2KNT(kt), &s));
152 }
153
154 static int
knexthop_insert(struct ktable * kt,struct knexthop_node * kn)155 knexthop_insert(struct ktable *kt, struct knexthop_node *kn)
156 {
157 if (RB_INSERT(knexthop_tree, KT2KNT(kt), kn) != NULL) {
158 log_warnx("%s: failed for %s", __func__,
159 log_addr(&kn->nexthop));
160 free(kn);
161 return (-1);
162 }
163
164 knexthop_send_update(kn);
165
166 return (0);
167 }
168
169 static int
knexthop_remove(struct ktable * kt,struct knexthop_node * kn)170 knexthop_remove(struct ktable *kt, struct knexthop_node *kn)
171 {
172 if (RB_REMOVE(knexthop_tree, KT2KNT(kt), kn) == NULL) {
173 log_warnx("%s: failed for %s", __func__,
174 log_addr(&kn->nexthop));
175 return (-1);
176 }
177
178 free(kn);
179 return (0);
180 }
181
182 static void
knexthop_clear(struct ktable * kt)183 knexthop_clear(struct ktable *kt)
184 {
185 struct knexthop_node *kn;
186
187 while ((kn = RB_MIN(knexthop_tree, KT2KNT(kt))) != NULL)
188 knexthop_remove(kt, kn);
189 }
190
191 int
kr_nexthop_add(u_int rtableid,struct bgpd_addr * addr,struct bgpd_config * conf)192 kr_nexthop_add(u_int rtableid, struct bgpd_addr *addr, struct bgpd_config *conf)
193 {
194 struct ktable *kt;
195 struct knexthop_node *h;
196
197 if (rtableid == 0)
198 rtableid = conf->default_tableid;
199
200 if ((kt = ktable_get(rtableid)) == NULL) {
201 log_warnx("%s: non-existent rtableid %d", __func__, rtableid);
202 return (0);
203 }
204 if ((h = knexthop_find(kt, addr)) != NULL) {
205 /* should not happen... this is actually an error path */
206 knexthop_send_update(h);
207 } else {
208 if ((h = calloc(1, sizeof(struct knexthop_node))) == NULL) {
209 log_warn("%s", __func__);
210 return (-1);
211 }
212 memcpy(&h->nexthop, addr, sizeof(h->nexthop));
213
214 if (knexthop_insert(kt, h) == -1)
215 return (-1);
216 }
217
218 return (0);
219 }
220
221 void
kr_nexthop_delete(u_int rtableid,struct bgpd_addr * addr,struct bgpd_config * conf)222 kr_nexthop_delete(u_int rtableid, struct bgpd_addr *addr,
223 struct bgpd_config *conf)
224 {
225 struct ktable *kt;
226 struct knexthop_node *kn;
227
228 if (rtableid == 0)
229 rtableid = conf->default_tableid;
230
231 if ((kt = ktable_get(rtableid)) == NULL) {
232 log_warnx("%s: non-existent rtableid %d", __func__,
233 rtableid);
234 return;
235 }
236 if ((kn = knexthop_find(kt, addr)) == NULL)
237 return;
238
239 knexthop_remove(kt, kn);
240 }
241
242 void
knexthop_send_update(struct knexthop_node * kn)243 knexthop_send_update(struct knexthop_node *kn)
244 {
245 struct kroute_nexthop n;
246 #if 0
247 struct kroute_node *kr;
248 struct kroute6_node *kr6;
249 #endif
250 struct ifaddrs *ifap, *ifa;
251
252 bzero(&n, sizeof(n));
253 memcpy(&n.nexthop, &kn->nexthop, sizeof(n.nexthop));
254
255 #if 0
256 if (kn->kroute == NULL) {
257 n.valid = 0; /* NH is not valid */
258 send_nexthop_update(&n);
259 return;
260 }
261
262 switch (kn->nexthop.aid) {
263 case AID_INET:
264 kr = kn->kroute;
265 n.valid = kroute_validate(&kr->r);
266 n.connected = kr->r.flags & F_CONNECTED;
267 if (kr->r.nexthop.s_addr != 0) {
268 n.gateway.aid = AID_INET;
269 n.gateway.v4.s_addr = kr->r.nexthop.s_addr;
270 }
271 if (n.connected) {
272 n.net.aid = AID_INET;
273 n.net.v4.s_addr = kr->r.prefix.s_addr;
274 n.netlen = kr->r.prefixlen;
275 }
276 break;
277 case AID_INET6:
278 kr6 = kn->kroute;
279 n.valid = kroute6_validate(&kr6->r);
280 n.connected = kr6->r.flags & F_CONNECTED;
281 if (memcmp(&kr6->r.nexthop, &in6addr_any,
282 sizeof(struct in6_addr)) != 0) {
283 n.gateway.aid = AID_INET6;
284 memcpy(&n.gateway.v6, &kr6->r.nexthop,
285 sizeof(struct in6_addr));
286 }
287 if (n.connected) {
288 n.net.aid = AID_INET6;
289 memcpy(&n.net.v6, &kr6->r.prefix,
290 sizeof(struct in6_addr));
291 n.netlen = kr6->r.prefixlen;
292 }
293 break;
294 }
295 #else
296 n.valid = 1; /* NH is always valid */
297 memcpy(&n.gateway, &kn->nexthop, sizeof(n.gateway));
298
299 if (getifaddrs(&ifap) == -1)
300 fatal("getifaddrs");
301
302 for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next) {
303 struct bgpd_addr addr;
304 struct sockaddr_in *m4;
305 struct sockaddr_in6 *m6;
306 int plen;
307
308 if (ifa->ifa_addr == NULL)
309 continue;
310
311 switch (ifa->ifa_addr->sa_family) {
312 case AF_INET:
313 m4 = (struct sockaddr_in *)ifa->ifa_netmask;
314 if (m4 == NULL)
315 plen = 32;
316 else
317 plen = mask2prefixlen(m4->sin_addr.s_addr);
318 break;
319 case AF_INET6:
320 m6 = (struct sockaddr_in6 *)ifa->ifa_netmask;
321 if (m6 == NULL)
322 plen = 128;
323 else
324 plen = mask2prefixlen6(m6);
325 break;
326 default:
327 continue;
328 }
329 sa2addr(ifa->ifa_addr, &addr, NULL);
330 if (prefix_compare(&n.nexthop, &addr, plen) != 0)
331 continue;
332
333 n.connected = F_CONNECTED;
334 n.gateway = addr;
335 n.net = addr;
336 n.netlen = plen;
337 break;
338 }
339
340 freeifaddrs(ifap);
341 #endif
342 send_nexthop_update(&n);
343 }
344
345 int
kr_init(int * fd)346 kr_init(int *fd)
347 {
348 struct ktable *kt = &krt;;
349
350 /* initialize structure ... */
351 strlcpy(kt->descr, "rdomain_0", sizeof(kt->descr));
352 RB_INIT(&kt->krt);
353 RB_INIT(&kt->krt6);
354 RB_INIT(&kt->knt);
355 TAILQ_INIT(&kt->krn);
356 kt->fib_conf = kt->fib_sync = 0;
357 kt->rtableid = 0;
358 kt->nhtableid = 0;
359
360 /* XXX need to return an FD that can be polled */
361 *fd = -1;
362 return (0);
363 }
364
365 void
kr_shutdown(u_int8_t fib_prio,u_int rdomain)366 kr_shutdown(u_int8_t fib_prio, u_int rdomain)
367 {
368 knexthop_clear(&krt);
369 }
370
371 void
kr_fib_couple(u_int rtableid,u_int8_t fib_prio)372 kr_fib_couple(u_int rtableid, u_int8_t fib_prio)
373 {
374 }
375
376 void
kr_fib_couple_all(u_int8_t fib_prio)377 kr_fib_couple_all(u_int8_t fib_prio)
378 {
379 }
380
381 void
kr_fib_decouple(u_int rtableid,u_int8_t fib_prio)382 kr_fib_decouple(u_int rtableid, u_int8_t fib_prio)
383 {
384 }
385
386 void
kr_fib_decouple_all(u_int8_t fib_prio)387 kr_fib_decouple_all(u_int8_t fib_prio)
388 {
389 }
390
391 void
kr_fib_update_prio_all(u_int8_t fib_prio)392 kr_fib_update_prio_all(u_int8_t fib_prio)
393 {
394 }
395
396 int
kr_dispatch_msg(u_int rdomain)397 kr_dispatch_msg(u_int rdomain)
398 {
399 return (0);
400 }
401
402 int
kr_change(u_int rtableid,struct kroute_full * kl,u_int8_t fib_prio)403 kr_change(u_int rtableid, struct kroute_full *kl, u_int8_t fib_prio)
404 {
405 return (0);
406 }
407
408 int
kr_delete(u_int rtableid,struct kroute_full * kl,u_int8_t fib_prio)409 kr_delete(u_int rtableid, struct kroute_full *kl, u_int8_t fib_prio)
410 {
411 return (0);
412 }
413
414 static int
kr_net_redist_add(struct ktable * kt,struct network_config * net,struct filter_set_head * attr,int dynamic)415 kr_net_redist_add(struct ktable *kt, struct network_config *net,
416 struct filter_set_head *attr, int dynamic)
417 {
418 struct kredist_node *r, *xr;
419
420 if ((r = calloc(1, sizeof(*r))) == NULL)
421 fatal("%s", __func__);
422 r->prefix = net->prefix;
423 r->prefixlen = net->prefixlen;
424 r->rd = net->rd;
425 r->dynamic = dynamic;
426
427 xr = RB_INSERT(kredist_tree, &kt->kredist, r);
428 if (xr != NULL) {
429 free(r);
430
431 if (dynamic != xr->dynamic && dynamic) {
432 /*
433 * ignore update a non-dynamic announcement is
434 * already present which has preference.
435 */
436 return 0;
437 }
438 /*
439 * only equal or non-dynamic announcement ends up here.
440 * In both cases reset the dynamic flag (nop for equal) and
441 * redistribute.
442 */
443 xr->dynamic = dynamic;
444 }
445
446 if (send_network(IMSG_NETWORK_ADD, net, attr) == -1)
447 log_warnx("%s: faild to send network update", __func__);
448 return 1;
449 }
450
451 static void
kr_net_redist_del(struct ktable * kt,struct network_config * net,int dynamic)452 kr_net_redist_del(struct ktable *kt, struct network_config *net, int dynamic)
453 {
454 struct kredist_node *r, node;
455
456 bzero(&node, sizeof(node));
457 node.prefix = net->prefix;
458 node.prefixlen = net->prefixlen;
459 node.rd = net->rd;
460
461 r = RB_FIND(kredist_tree, &kt->kredist, &node);
462 if (r == NULL || dynamic != r->dynamic)
463 return;
464
465 if (RB_REMOVE(kredist_tree, &kt->kredist, r) == NULL) {
466 log_warnx("%s: failed to remove network %s/%u", __func__,
467 log_addr(&node.prefix), node.prefixlen);
468 return;
469 }
470 free(r);
471
472 if (send_network(IMSG_NETWORK_REMOVE, net, NULL) == -1)
473 log_warnx("%s: faild to send network removal", __func__);
474 }
475
476 static struct network *
kr_net_find(struct ktable * kt,struct network * n)477 kr_net_find(struct ktable *kt, struct network *n)
478 {
479 struct network *xn;
480
481 TAILQ_FOREACH(xn, &kt->krn, entry) {
482 if (n->net.type != xn->net.type ||
483 n->net.prefixlen != xn->net.prefixlen ||
484 n->net.rd != xn->net.rd)
485 continue;
486 if (memcmp(&n->net.prefix, &xn->net.prefix,
487 sizeof(n->net.prefix)) == 0)
488 return (xn);
489 }
490 return (NULL);
491 }
492
493 static void
kr_net_delete(struct network * n)494 kr_net_delete(struct network *n)
495 {
496 filterset_free(&n->net.attrset);
497 free(n);
498 }
499
500 void
kr_net_reload(u_int rtableid,u_int64_t rd,struct network_head * nh)501 kr_net_reload(u_int rtableid, u_int64_t rd, struct network_head *nh)
502 {
503 struct network *n, *xn;
504 struct ktable *kt;
505
506 if ((kt = ktable_get(rtableid)) == NULL)
507 fatalx("%s: non-existent rtableid %d", __func__, rtableid);
508
509 while ((n = TAILQ_FIRST(nh)) != NULL) {
510 TAILQ_REMOVE(nh, n, entry);
511
512 if (n->net.type != NETWORK_DEFAULT) {
513 log_warnx("dynamic network statements unimplemened, "
514 "network ignored");
515 kr_net_delete(n);
516 continue;
517 }
518
519 n->net.old = 0;
520 n->net.rd = rd;
521 xn = kr_net_find(kt, n);
522 if (xn) {
523 xn->net.old = 0;
524 filterset_free(&xn->net.attrset);
525 filterset_move(&n->net.attrset, &xn->net.attrset);
526 kr_net_delete(n);
527 } else {
528 TAILQ_INSERT_TAIL(&kt->krn, n, entry);
529 }
530 }
531 }
532
533 int
kr_reload(void)534 kr_reload(void)
535 {
536 struct ktable *kt;
537 struct network *n;
538 u_int rid;
539
540 for (rid = 0; rid < krt_size; rid++) {
541 if ((kt = ktable_get(rid)) == NULL)
542 continue;
543
544 TAILQ_FOREACH(n, &kt->krn, entry)
545 if (n->net.type == NETWORK_DEFAULT) {
546 kr_net_redist_add(kt, &n->net,
547 &n->net.attrset, 0);
548 } else
549 fatalx("%s: dynamic networks not implemented",
550 __func__);
551 }
552
553 return (0);
554 }
555
556 int
kr_flush(u_int rtableid)557 kr_flush(u_int rtableid)
558 {
559 /* nothing to flush for now */
560 return (0);
561 }
562
563 void
kr_show_route(struct imsg * imsg)564 kr_show_route(struct imsg *imsg)
565 {
566 struct ctl_show_nexthop snh;
567 struct ktable *kt;
568 struct knexthop_node *h;
569 int code;
570
571 switch (imsg->hdr.type) {
572 case IMSG_CTL_SHOW_NEXTHOP:
573 kt = ktable_get(imsg->hdr.peerid);
574 if (kt == NULL) {
575 log_warnx("%s: table %u does not exist", __func__,
576 imsg->hdr.peerid);
577 break;
578 }
579 RB_FOREACH(h, knexthop_tree, KT2KNT(kt)) {
580 bzero(&snh, sizeof(snh));
581 memcpy(&snh.addr, &h->nexthop, sizeof(snh.addr));
582 #if 0
583 if (h->kroute != NULL) {
584 switch (h->nexthop.aid) {
585 case AID_INET:
586 kr = h->kroute;
587 snh.valid = kroute_validate(&kr->r);
588 snh.krvalid = 1;
589 memcpy(&snh.kr.kr4, &kr->r,
590 sizeof(snh.kr.kr4));
591 ifindex = kr->r.ifindex;
592 break;
593 case AID_INET6:
594 kr6 = h->kroute;
595 snh.valid = kroute6_validate(&kr6->r);
596 snh.krvalid = 1;
597 memcpy(&snh.kr.kr6, &kr6->r,
598 sizeof(snh.kr.kr6));
599 ifindex = kr6->r.ifindex;
600 break;
601 }
602 if ((kif = kif_find(ifindex)) != NULL)
603 memcpy(&snh.iface,
604 kr_show_interface(&kif->k),
605 sizeof(snh.iface));
606 }
607 #else
608 snh.valid = 1;
609 snh.krvalid = 1;
610 #endif
611 send_imsg_session(IMSG_CTL_SHOW_NEXTHOP, imsg->hdr.pid,
612 &snh, sizeof(snh));
613 }
614 break;
615 case IMSG_CTL_SHOW_FIB_TABLES:
616 {
617 struct ktable ktab;
618
619 ktab = krt;
620 /* do not leak internal information */
621 RB_INIT(&ktab.krt);
622 RB_INIT(&ktab.krt6);
623 RB_INIT(&ktab.knt);
624 TAILQ_INIT(&ktab.krn);
625
626 send_imsg_session(IMSG_CTL_SHOW_FIB_TABLES,
627 imsg->hdr.pid, &ktab, sizeof(ktab));
628 }
629 break;
630 default: /* nada */
631 code = CTL_RES_DENIED /* XXX */;
632 send_imsg_session(IMSG_CTL_RESULT, imsg->hdr.pid,
633 &code, sizeof(code));
634 return;
635 }
636
637 send_imsg_session(IMSG_CTL_END, imsg->hdr.pid, NULL, 0);
638 }
639
640 void
kr_ifinfo(char * ifname)641 kr_ifinfo(char *ifname)
642 {
643 }
644
645 int
ktable_exists(u_int rtableid,u_int * rdomid)646 ktable_exists(u_int rtableid, u_int *rdomid)
647 {
648 if (rtableid == 0) {
649 *rdomid = 0;
650 return (1);
651 }
652 return (0);
653 }
654
655 struct ktable *
ktable_get(u_int rtableid)656 ktable_get(u_int rtableid)
657 {
658 if (rtableid == 0)
659 return &krt;
660 return NULL;
661 }
662
663 static void
ktable_free(u_int rtableid,u_int8_t fib_prio)664 ktable_free(u_int rtableid, u_int8_t fib_prio)
665 {
666 fatalx("%s not implemented", __func__);
667 }
668
669 int
ktable_update(u_int rtableid,char * name,int flags,u_int8_t fib_prio)670 ktable_update(u_int rtableid, char *name, int flags, u_int8_t fib_prio)
671 {
672 struct ktable *kt;
673
674 kt = ktable_get(rtableid);
675 if (kt == NULL) {
676 return (-1);
677 } else {
678 /* fib sync has higher preference then no sync */
679 if (kt->state == RECONF_DELETE) {
680 kt->fib_conf = !(flags & F_RIB_NOFIBSYNC);
681 kt->state = RECONF_KEEP;
682 } else if (!kt->fib_conf)
683 kt->fib_conf = !(flags & F_RIB_NOFIBSYNC);
684
685 strlcpy(kt->descr, name, sizeof(kt->descr));
686 }
687 return (0);
688 }
689
690 void
ktable_preload(void)691 ktable_preload(void)
692 {
693 struct ktable *kt;
694 struct network *n;
695 u_int i;
696
697 for (i = 0; i < krt_size; i++) {
698 if ((kt = ktable_get(i)) == NULL)
699 continue;
700 kt->state = RECONF_DELETE;
701
702 /* mark all networks as old */
703 TAILQ_FOREACH(n, &kt->krn, entry)
704 n->net.old = 1;
705 }
706 }
707
708 void
ktable_postload(u_int8_t fib_prio)709 ktable_postload(u_int8_t fib_prio)
710 {
711 struct ktable *kt;
712 struct network *n, *xn;
713 u_int i;
714
715 for (i = krt_size; i > 0; i--) {
716 if ((kt = ktable_get(i - 1)) == NULL)
717 continue;
718 if (kt->state == RECONF_DELETE) {
719 ktable_free(i - 1, fib_prio);
720 continue;
721 } else if (kt->state == RECONF_REINIT)
722 kt->fib_sync = kt->fib_conf;
723
724 /* cleanup old networks */
725 TAILQ_FOREACH_SAFE(n, &kt->krn, entry, xn) {
726 if (n->net.old) {
727 TAILQ_REMOVE(&kt->krn, n, entry);
728 if (n->net.type == NETWORK_DEFAULT)
729 kr_net_redist_del(kt, &n->net, 0);
730 kr_net_delete(n);
731 }
732 }
733 }
734 }
735
736 int
get_mpe_config(const char * name,u_int * rdomain,u_int * label)737 get_mpe_config(const char *name, u_int *rdomain, u_int *label)
738 {
739 return (-1);
740 }
741
742 static u_int8_t
mask2prefixlen(in_addr_t ina)743 mask2prefixlen(in_addr_t ina)
744 {
745 if (ina == 0)
746 return (0);
747 else
748 return (33 - ffs(ntohl(ina)));
749 }
750
751 static u_int8_t
mask2prefixlen6(struct sockaddr_in6 * sa_in6)752 mask2prefixlen6(struct sockaddr_in6 *sa_in6)
753 {
754 u_int8_t *ap, *ep;
755 u_int l = 0;
756
757 /*
758 * There is no sin6_len for portability so calculate the end pointer
759 * so that a full IPv6 address fits. On systems without sa_len this
760 * is fine, on OpenBSD this is also correct. On other systems the
761 * assumtion is they behave like OpenBSD or that there is at least
762 * a 0 byte right after the end of the truncated sockaddr_in6.
763 */
764 ap = (u_int8_t *)&sa_in6->sin6_addr;
765 ep = ap + sizeof(struct in6_addr);
766 for (; ap < ep; ap++) {
767 /* this "beauty" is adopted from sbin/route/show.c ... */
768 switch (*ap) {
769 case 0xff:
770 l += 8;
771 break;
772 case 0xfe:
773 l += 7;
774 goto done;
775 case 0xfc:
776 l += 6;
777 goto done;
778 case 0xf8:
779 l += 5;
780 goto done;
781 case 0xf0:
782 l += 4;
783 goto done;
784 case 0xe0:
785 l += 3;
786 goto done;
787 case 0xc0:
788 l += 2;
789 goto done;
790 case 0x80:
791 l += 1;
792 goto done;
793 case 0x00:
794 goto done;
795 default:
796 fatalx("non contiguous inet6 netmask");
797 }
798 }
799
800 done:
801 if (l > sizeof(struct in6_addr) * 8)
802 fatalx("%s: prefixlen %d out of bound", __func__, l);
803 return (l);
804 }
805