1 /* $OpenBSD: kroute.c,v 1.20 2023/03/08 04:43:13 guenther Exp $ */
2
3 /*
4 * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/sysctl.h>
24 #include <net/if.h>
25 #include <net/if_dl.h>
26 #include <net/route.h>
27 #include <netinet/in.h>
28
29 #include <arpa/inet.h>
30 #include <errno.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <unistd.h>
34
35 #include "eigrpd.h"
36 #include "log.h"
37
38 static struct {
39 uint32_t rtseq;
40 pid_t pid;
41 int fib_sync;
42 int fd;
43 struct event ev;
44 unsigned int rdomain;
45 } kr_state;
46
47 struct kroute_node {
48 TAILQ_ENTRY(kroute_node) entry;
49 struct kroute_priority *kprio; /* back pointer */
50 struct kroute r;
51 };
52
53 struct kroute_priority {
54 TAILQ_ENTRY(kroute_priority) entry;
55 struct kroute_prefix *kp; /* back pointer */
56 uint8_t priority;
57 TAILQ_HEAD(, kroute_node) nexthops;
58 };
59
60 struct kroute_prefix {
61 RB_ENTRY(kroute_prefix) entry;
62 int af;
63 union eigrpd_addr prefix;
64 uint8_t prefixlen;
65 TAILQ_HEAD(plist, kroute_priority) priorities;
66 };
67 RB_HEAD(kroute_tree, kroute_prefix);
68 RB_PROTOTYPE(kroute_tree, kroute_prefix, entry, kroute_compare)
69
70 struct kif_addr {
71 TAILQ_ENTRY(kif_addr) entry;
72 struct kaddr a;
73 };
74
75 struct kif_node {
76 RB_ENTRY(kif_node) entry;
77 TAILQ_HEAD(, kif_addr) addrs;
78 struct kif k;
79 };
80 RB_HEAD(kif_tree, kif_node);
81 RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare)
82
83 static void kr_dispatch_msg(int, short, void *);
84 static void kr_redist_remove(struct kroute *);
85 static int kr_redist_eval(struct kroute *);
86 static void kr_redistribute(struct kroute_prefix *);
87 static __inline int kroute_compare(struct kroute_prefix *,
88 struct kroute_prefix *);
89 static struct kroute_prefix *kroute_find_prefix(int, union eigrpd_addr *,
90 uint8_t);
91 static struct kroute_priority *kroute_find_prio(struct kroute_prefix *,
92 uint8_t);
93 static struct kroute_node *kroute_find_gw(struct kroute_priority *,
94 union eigrpd_addr *);
95 static struct kroute_node *kroute_insert(struct kroute *);
96 static int kroute_remove(struct kroute *);
97 static void kroute_clear(void);
98 static __inline int kif_compare(struct kif_node *, struct kif_node *);
99 static struct kif_node *kif_find(unsigned short);
100 static struct kif_node *kif_insert(unsigned short);
101 static int kif_remove(struct kif_node *);
102 static struct kif *kif_update(unsigned short, int, struct if_data *,
103 struct sockaddr_dl *);
104 static int kif_validate(unsigned short);
105 static void protect_lo(void);
106 static uint8_t prefixlen_classful(in_addr_t);
107 static void get_rtaddrs(int, struct sockaddr *, struct sockaddr **);
108 static void if_change(unsigned short, int, struct if_data *,
109 struct sockaddr_dl *);
110 static void if_newaddr(unsigned short, struct sockaddr *,
111 struct sockaddr *, struct sockaddr *);
112 static void if_deladdr(unsigned short, struct sockaddr *,
113 struct sockaddr *, struct sockaddr *);
114 static void if_announce(void *);
115 static int send_rtmsg_v4(int, int, struct kroute *);
116 static int send_rtmsg_v6(int, int, struct kroute *);
117 static int send_rtmsg(int, int, struct kroute *);
118 static int fetchtable(void);
119 static int fetchifs(void);
120 static int dispatch_rtmsg(void);
121 static int rtmsg_process(char *, size_t);
122 static int rtmsg_process_route(struct rt_msghdr *,
123 struct sockaddr *[RTAX_MAX]);
124
125 RB_GENERATE(kroute_tree, kroute_prefix, entry, kroute_compare)
126 RB_GENERATE(kif_tree, kif_node, entry, kif_compare)
127
128 static struct kroute_tree krt = RB_INITIALIZER(&krt);
129 static struct kif_tree kit = RB_INITIALIZER(&kit);
130
131 int
kif_init(void)132 kif_init(void)
133 {
134 if (fetchifs() == -1)
135 return (-1);
136
137 return (0);
138 }
139
140 int
kr_init(int fs,unsigned int rdomain)141 kr_init(int fs, unsigned int rdomain)
142 {
143 int opt = 0, rcvbuf, default_rcvbuf;
144 socklen_t optlen;
145
146 kr_state.fib_sync = fs;
147 kr_state.rdomain = rdomain;
148
149 if ((kr_state.fd = socket(AF_ROUTE,
150 SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)) == -1) {
151 log_warn("%s: socket", __func__);
152 return (-1);
153 }
154
155 /* not interested in my own messages */
156 if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK,
157 &opt, sizeof(opt)) == -1)
158 log_warn("%s: setsockopt(SO_USELOOPBACK)", __func__);
159
160 /* grow receive buffer, don't wanna miss messages */
161 optlen = sizeof(default_rcvbuf);
162 if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
163 &default_rcvbuf, &optlen) == -1)
164 log_warn("%s: getsockopt SOL_SOCKET SO_RCVBUF", __func__);
165 else
166 for (rcvbuf = MAX_RTSOCK_BUF;
167 rcvbuf > default_rcvbuf &&
168 setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
169 &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS;
170 rcvbuf /= 2)
171 ; /* nothing */
172
173 kr_state.pid = getpid();
174 kr_state.rtseq = 1;
175
176 if (fetchtable() == -1)
177 return (-1);
178
179 protect_lo();
180
181 event_set(&kr_state.ev, kr_state.fd, EV_READ | EV_PERSIST,
182 kr_dispatch_msg, NULL);
183 event_add(&kr_state.ev, NULL);
184
185 return (0);
186 }
187
188 void
kif_redistribute(void)189 kif_redistribute(void)
190 {
191 struct kif_node *kif;
192 struct kif_addr *ka;
193
194 RB_FOREACH(kif, kif_tree, &kit) {
195 main_imsg_compose_eigrpe(IMSG_IFINFO, 0, &kif->k,
196 sizeof(struct kif));
197 TAILQ_FOREACH(ka, &kif->addrs, entry) {
198 main_imsg_compose_eigrpe(IMSG_NEWADDR, 0, &ka->a,
199 sizeof(ka->a));
200 }
201 }
202 }
203
204 int
kr_change(struct kroute * kr)205 kr_change(struct kroute *kr)
206 {
207 struct kroute_prefix *kp;
208 struct kroute_priority *kprio;
209 struct kroute_node *kn;
210 int action = RTM_ADD;
211
212 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen);
213 if (kp == NULL)
214 kn = kroute_insert(kr);
215 else {
216 kprio = kroute_find_prio(kp, kr->priority);
217 if (kprio == NULL)
218 kn = kroute_insert(kr);
219 else {
220 kn = kroute_find_gw(kprio, &kr->nexthop);
221 if (kn == NULL)
222 kn = kroute_insert(kr);
223 else
224 action = RTM_CHANGE;
225 }
226 }
227
228 /* send update */
229 if (send_rtmsg(kr_state.fd, action, kr) == -1)
230 return (-1);
231
232 kn->r.flags |= F_EIGRPD_INSERTED;
233
234 return (0);
235 }
236
237 int
kr_delete(struct kroute * kr)238 kr_delete(struct kroute *kr)
239 {
240 struct kroute_prefix *kp;
241 struct kroute_priority *kprio;
242 struct kroute_node *kn;
243
244 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen);
245 if (kp == NULL)
246 return (0);
247 kprio = kroute_find_prio(kp, kr->priority);
248 if (kprio == NULL)
249 return (0);
250 kn = kroute_find_gw(kprio, &kr->nexthop);
251 if (kn == NULL)
252 return (0);
253
254 if (!(kn->r.flags & F_EIGRPD_INSERTED))
255 return (0);
256
257 if (send_rtmsg(kr_state.fd, RTM_DELETE, &kn->r) == -1)
258 return (-1);
259
260 if (kroute_remove(kr) == -1)
261 return (-1);
262
263 return (0);
264 }
265
266 void
kr_shutdown(void)267 kr_shutdown(void)
268 {
269 kr_fib_decouple();
270 kroute_clear();
271 kif_clear();
272 }
273
274 void
kr_fib_couple(void)275 kr_fib_couple(void)
276 {
277 struct kroute_prefix *kp;
278 struct kroute_priority *kprio;
279 struct kroute_node *kn;
280
281 if (kr_state.fib_sync == 1) /* already coupled */
282 return;
283
284 kr_state.fib_sync = 1;
285
286 RB_FOREACH(kp, kroute_tree, &krt)
287 TAILQ_FOREACH(kprio, &kp->priorities, entry)
288 TAILQ_FOREACH(kn, &kprio->nexthops, entry) {
289 if (!(kn->r.flags & F_EIGRPD_INSERTED))
290 continue;
291 send_rtmsg(kr_state.fd, RTM_ADD, &kn->r);
292 }
293
294 log_info("kernel routing table coupled");
295 }
296
297 void
kr_fib_decouple(void)298 kr_fib_decouple(void)
299 {
300 struct kroute_prefix *kp;
301 struct kroute_priority *kprio;
302 struct kroute_node *kn;
303
304 if (kr_state.fib_sync == 0) /* already decoupled */
305 return;
306
307 RB_FOREACH(kp, kroute_tree, &krt)
308 TAILQ_FOREACH(kprio, &kp->priorities, entry)
309 TAILQ_FOREACH(kn, &kprio->nexthops, entry) {
310 if (!(kn->r.flags & F_EIGRPD_INSERTED))
311 continue;
312
313 send_rtmsg(kr_state.fd, RTM_DELETE, &kn->r);
314 }
315
316 kr_state.fib_sync = 0;
317
318 log_info("kernel routing table decoupled");
319 }
320
321 static void
kr_dispatch_msg(int fd,short event,void * bula)322 kr_dispatch_msg(int fd, short event, void *bula)
323 {
324 if (dispatch_rtmsg() == -1)
325 event_loopexit(NULL);
326 }
327
328 void
kr_show_route(struct imsg * imsg)329 kr_show_route(struct imsg *imsg)
330 {
331 struct kroute_prefix *kp;
332 struct kroute_priority *kprio;
333 struct kroute_node *kn;
334 struct kroute kr;
335 int flags;
336
337 if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags)) {
338 log_warnx("%s: wrong imsg len", __func__);
339 return;
340 }
341 memcpy(&flags, imsg->data, sizeof(flags));
342 RB_FOREACH(kp, kroute_tree, &krt)
343 TAILQ_FOREACH(kprio, &kp->priorities, entry)
344 TAILQ_FOREACH(kn, &kprio->nexthops, entry) {
345 if (flags && !(kn->r.flags & flags))
346 continue;
347
348 kr = kn->r;
349 if (kr.priority ==
350 eigrpd_conf->fib_priority_external)
351 kr.flags |= F_CTL_EXTERNAL;
352 main_imsg_compose_eigrpe(IMSG_CTL_KROUTE,
353 imsg->hdr.pid, &kr, sizeof(kr));
354 }
355
356 main_imsg_compose_eigrpe(IMSG_CTL_END, imsg->hdr.pid, NULL, 0);
357 }
358
359 void
kr_ifinfo(char * ifname,pid_t pid)360 kr_ifinfo(char *ifname, pid_t pid)
361 {
362 struct kif_node *kif;
363
364 RB_FOREACH(kif, kif_tree, &kit)
365 if (ifname == NULL || !strcmp(ifname, kif->k.ifname)) {
366 main_imsg_compose_eigrpe(IMSG_CTL_IFINFO,
367 pid, &kif->k, sizeof(kif->k));
368 }
369
370 main_imsg_compose_eigrpe(IMSG_CTL_END, pid, NULL, 0);
371 }
372
373 static void
kr_redist_remove(struct kroute * kr)374 kr_redist_remove(struct kroute *kr)
375 {
376 /* was the route redistributed? */
377 if (!(kr->flags & F_REDISTRIBUTED))
378 return;
379
380 /* remove redistributed flag */
381 kr->flags &= ~F_REDISTRIBUTED;
382 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, kr, sizeof(*kr));
383 }
384
385 static int
kr_redist_eval(struct kroute * kr)386 kr_redist_eval(struct kroute *kr)
387 {
388 /* Only non-eigrpd routes are considered for redistribution. */
389 if (!(kr->flags & F_KERNEL))
390 goto dont_redistribute;
391
392 /* Dynamic routes are not redistributable. */
393 if (kr->flags & F_DYNAMIC)
394 goto dont_redistribute;
395
396 /* filter-out non-redistributable addresses */
397 if (bad_addr(kr->af, &kr->prefix) ||
398 (kr->af == AF_INET6 && IN6_IS_SCOPE_EMBED(&kr->prefix.v6)))
399 goto dont_redistribute;
400
401 /* interface is not up and running so don't announce */
402 if (kr->flags & F_DOWN)
403 goto dont_redistribute;
404
405 /*
406 * Consider networks with nexthop loopback as not redistributable
407 * unless it is a reject or blackhole route.
408 */
409 switch (kr->af) {
410 case AF_INET:
411 if (kr->nexthop.v4.s_addr == htonl(INADDR_LOOPBACK) &&
412 !(kr->flags & (F_BLACKHOLE|F_REJECT)))
413 goto dont_redistribute;
414 break;
415 case AF_INET6:
416 if (IN6_IS_ADDR_LOOPBACK(&kr->nexthop.v6) &&
417 !(kr->flags & (F_BLACKHOLE|F_REJECT)))
418 goto dont_redistribute;
419 break;
420 default:
421 log_debug("%s: unexpected address-family", __func__);
422 break;
423 }
424
425 /* prefix should be redistributed */
426 kr->flags |= F_REDISTRIBUTED;
427 main_imsg_compose_rde(IMSG_NETWORK_ADD, 0, kr, sizeof(*kr));
428 return (1);
429
430 dont_redistribute:
431 kr_redist_remove(kr);
432 return (0);
433 }
434
435 static void
kr_redistribute(struct kroute_prefix * kp)436 kr_redistribute(struct kroute_prefix *kp)
437 {
438 struct kroute_priority *kprio;
439 struct kroute_node *kn;
440
441 /* only the highest prio route can be redistributed */
442 TAILQ_FOREACH_REVERSE(kprio, &kp->priorities, plist, entry) {
443 if (kprio == TAILQ_FIRST(&kp->priorities)) {
444 TAILQ_FOREACH(kn, &kprio->nexthops, entry)
445 /* pick just one entry in case of multipath */
446 if (kr_redist_eval(&kn->r))
447 break;
448 } else {
449 TAILQ_FOREACH(kn, &kprio->nexthops, entry)
450 kr_redist_remove(&kn->r);
451 }
452 }
453 }
454
455 static __inline int
kroute_compare(struct kroute_prefix * a,struct kroute_prefix * b)456 kroute_compare(struct kroute_prefix *a, struct kroute_prefix *b)
457 {
458 int addrcmp;
459
460 if (a->af < b->af)
461 return (-1);
462 if (a->af > b->af)
463 return (1);
464
465 addrcmp = eigrp_addrcmp(a->af, &a->prefix, &b->prefix);
466 if (addrcmp != 0)
467 return (addrcmp);
468
469 if (a->prefixlen < b->prefixlen)
470 return (-1);
471 if (a->prefixlen > b->prefixlen)
472 return (1);
473
474 return (0);
475 }
476
477 /* tree management */
478 static struct kroute_prefix *
kroute_find_prefix(int af,union eigrpd_addr * prefix,uint8_t prefixlen)479 kroute_find_prefix(int af, union eigrpd_addr *prefix, uint8_t prefixlen)
480 {
481 struct kroute_prefix s;
482
483 s.af = af;
484 s.prefix = *prefix;
485 s.prefixlen = prefixlen;
486
487 return (RB_FIND(kroute_tree, &krt, &s));
488 }
489
490 static struct kroute_priority *
kroute_find_prio(struct kroute_prefix * kp,uint8_t prio)491 kroute_find_prio(struct kroute_prefix *kp, uint8_t prio)
492 {
493 struct kroute_priority *kprio;
494
495 /* RTP_ANY here picks the lowest priority node */
496 if (prio == RTP_ANY)
497 return (TAILQ_FIRST(&kp->priorities));
498
499 TAILQ_FOREACH(kprio, &kp->priorities, entry)
500 if (kprio->priority == prio)
501 return (kprio);
502
503 return (NULL);
504 }
505
506 static struct kroute_node *
kroute_find_gw(struct kroute_priority * kprio,union eigrpd_addr * nh)507 kroute_find_gw(struct kroute_priority *kprio, union eigrpd_addr *nh)
508 {
509 struct kroute_node *kn;
510
511 TAILQ_FOREACH(kn, &kprio->nexthops, entry)
512 if (eigrp_addrcmp(kprio->kp->af, &kn->r.nexthop, nh) == 0)
513 return (kn);
514
515 return (NULL);
516 }
517
518 static struct kroute_node *
kroute_insert(struct kroute * kr)519 kroute_insert(struct kroute *kr)
520 {
521 struct kroute_prefix *kp;
522 struct kroute_priority *kprio, *tmp;
523 struct kroute_node *kn;
524
525 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen);
526 if (kp == NULL) {
527 kp = calloc(1, sizeof((*kp)));
528 if (kp == NULL)
529 fatal("kroute_insert");
530 kp->af = kr->af;
531 kp->prefix = kr->prefix;
532 kp->prefixlen = kr->prefixlen;
533 TAILQ_INIT(&kp->priorities);
534 RB_INSERT(kroute_tree, &krt, kp);
535 }
536
537 kprio = kroute_find_prio(kp, kr->priority);
538 if (kprio == NULL) {
539 kprio = calloc(1, sizeof(*kprio));
540 if (kprio == NULL)
541 fatal("kroute_insert");
542 kprio->kp = kp;
543 kprio->priority = kr->priority;
544 TAILQ_INIT(&kprio->nexthops);
545
546 /* lower priorities first */
547 TAILQ_FOREACH(tmp, &kp->priorities, entry)
548 if (tmp->priority > kprio->priority)
549 break;
550 if (tmp)
551 TAILQ_INSERT_BEFORE(tmp, kprio, entry);
552 else
553 TAILQ_INSERT_TAIL(&kp->priorities, kprio, entry);
554 }
555
556 kn = kroute_find_gw(kprio, &kr->nexthop);
557 if (kn == NULL) {
558 kn = calloc(1, sizeof(*kn));
559 if (kn == NULL)
560 fatal("kroute_insert");
561 kn->kprio = kprio;
562 kn->r = *kr;
563 TAILQ_INSERT_TAIL(&kprio->nexthops, kn, entry);
564 }
565
566 if (!(kr->flags & F_KERNEL)) {
567 /* don't validate or redistribute eigrp route */
568 kr->flags &= ~F_DOWN;
569 return (kn);
570 }
571
572 if (kif_validate(kr->ifindex))
573 kr->flags &= ~F_DOWN;
574 else
575 kr->flags |= F_DOWN;
576
577 kr_redistribute(kp);
578 return (kn);
579 }
580
581 static int
kroute_remove(struct kroute * kr)582 kroute_remove(struct kroute *kr)
583 {
584 struct kroute_prefix *kp;
585 struct kroute_priority *kprio;
586 struct kroute_node *kn;
587
588 kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen);
589 if (kp == NULL)
590 goto notfound;
591 kprio = kroute_find_prio(kp, kr->priority);
592 if (kprio == NULL)
593 goto notfound;
594 kn = kroute_find_gw(kprio, &kr->nexthop);
595 if (kn == NULL)
596 goto notfound;
597
598 kr_redist_remove(&kn->r);
599
600 TAILQ_REMOVE(&kprio->nexthops, kn, entry);
601 free(kn);
602
603 if (TAILQ_EMPTY(&kprio->nexthops)) {
604 TAILQ_REMOVE(&kp->priorities, kprio, entry);
605 free(kprio);
606 }
607
608 if (TAILQ_EMPTY(&kp->priorities)) {
609 if (RB_REMOVE(kroute_tree, &krt, kp) == NULL) {
610 log_warnx("%s failed for %s/%u", __func__,
611 log_addr(kr->af, &kr->prefix), kp->prefixlen);
612 return (-1);
613 }
614 free(kp);
615 } else
616 kr_redistribute(kp);
617
618 return (0);
619
620 notfound:
621 log_warnx("%s failed to find %s/%u", __func__,
622 log_addr(kr->af, &kr->prefix), kr->prefixlen);
623 return (-1);
624 }
625
626 static void
kroute_clear(void)627 kroute_clear(void)
628 {
629 struct kroute_prefix *kp;
630 struct kroute_priority *kprio;
631 struct kroute_node *kn;
632
633 while ((kp = RB_MIN(kroute_tree, &krt)) != NULL) {
634 while ((kprio = TAILQ_FIRST(&kp->priorities)) != NULL) {
635 while ((kn = TAILQ_FIRST(&kprio->nexthops)) != NULL) {
636 TAILQ_REMOVE(&kprio->nexthops, kn, entry);
637 free(kn);
638 }
639 TAILQ_REMOVE(&kp->priorities, kprio, entry);
640 free(kprio);
641 }
642 RB_REMOVE(kroute_tree, &krt, kp);
643 free(kp);
644 }
645 }
646
647 static __inline int
kif_compare(struct kif_node * a,struct kif_node * b)648 kif_compare(struct kif_node *a, struct kif_node *b)
649 {
650 return (b->k.ifindex - a->k.ifindex);
651 }
652
653 /* tree management */
654 static struct kif_node *
kif_find(unsigned short ifindex)655 kif_find(unsigned short ifindex)
656 {
657 struct kif_node s;
658
659 memset(&s, 0, sizeof(s));
660 s.k.ifindex = ifindex;
661
662 return (RB_FIND(kif_tree, &kit, &s));
663 }
664
665 struct kif *
kif_findname(char * ifname)666 kif_findname(char *ifname)
667 {
668 struct kif_node *kif;
669
670 RB_FOREACH(kif, kif_tree, &kit)
671 if (!strcmp(ifname, kif->k.ifname))
672 return (&kif->k);
673
674 return (NULL);
675 }
676
677 static struct kif_node *
kif_insert(unsigned short ifindex)678 kif_insert(unsigned short ifindex)
679 {
680 struct kif_node *kif;
681
682 if ((kif = calloc(1, sizeof(struct kif_node))) == NULL)
683 return (NULL);
684
685 kif->k.ifindex = ifindex;
686 TAILQ_INIT(&kif->addrs);
687
688 if (RB_INSERT(kif_tree, &kit, kif) != NULL)
689 fatalx("kif_insert: RB_INSERT");
690
691 return (kif);
692 }
693
694 static int
kif_remove(struct kif_node * kif)695 kif_remove(struct kif_node *kif)
696 {
697 struct kif_addr *ka;
698
699 if (RB_REMOVE(kif_tree, &kit, kif) == NULL) {
700 log_warnx("%s failed for interface %s", __func__, kif->k.ifname);
701 return (-1);
702 }
703
704 while ((ka = TAILQ_FIRST(&kif->addrs)) != NULL) {
705 TAILQ_REMOVE(&kif->addrs, ka, entry);
706 free(ka);
707 }
708 free(kif);
709 return (0);
710 }
711
712 void
kif_clear(void)713 kif_clear(void)
714 {
715 struct kif_node *kif;
716
717 while ((kif = RB_MIN(kif_tree, &kit)) != NULL)
718 kif_remove(kif);
719 }
720
721 static struct kif *
kif_update(unsigned short ifindex,int flags,struct if_data * ifd,struct sockaddr_dl * sdl)722 kif_update(unsigned short ifindex, int flags, struct if_data *ifd,
723 struct sockaddr_dl *sdl)
724 {
725 struct kif_node *kif;
726
727 if ((kif = kif_find(ifindex)) == NULL) {
728 if ((kif = kif_insert(ifindex)) == NULL)
729 return (NULL);
730 kif->k.nh_reachable = (flags & IFF_UP) &&
731 LINK_STATE_IS_UP(ifd->ifi_link_state);
732 }
733
734 kif->k.flags = flags;
735 kif->k.link_state = ifd->ifi_link_state;
736 kif->k.if_type = ifd->ifi_type;
737 kif->k.baudrate = ifd->ifi_baudrate;
738 kif->k.mtu = ifd->ifi_mtu;
739 kif->k.rdomain = ifd->ifi_rdomain;
740
741 if (sdl && sdl->sdl_family == AF_LINK) {
742 if (sdl->sdl_nlen >= sizeof(kif->k.ifname))
743 memcpy(kif->k.ifname, sdl->sdl_data,
744 sizeof(kif->k.ifname) - 1);
745 else if (sdl->sdl_nlen > 0)
746 memcpy(kif->k.ifname, sdl->sdl_data,
747 sdl->sdl_nlen);
748 /* string already terminated via calloc() */
749 }
750
751 return (&kif->k);
752 }
753
754 static int
kif_validate(unsigned short ifindex)755 kif_validate(unsigned short ifindex)
756 {
757 struct kif_node *kif;
758
759 if ((kif = kif_find(ifindex)) == NULL)
760 return (0);
761
762 return (kif->k.nh_reachable);
763 }
764
765 /* misc */
766 static void
protect_lo(void)767 protect_lo(void)
768 {
769 struct kroute kr4, kr6;
770
771 /* special protection for 127/8 */
772 memset(&kr4, 0, sizeof(kr4));
773 kr4.af = AF_INET;
774 kr4.prefix.v4.s_addr = htonl(INADDR_LOOPBACK & IN_CLASSA_NET);
775 kr4.prefixlen = 8;
776 kr4.flags = F_KERNEL|F_CONNECTED;
777 kroute_insert(&kr4);
778
779 /* special protection for ::1 */
780 memset(&kr6, 0, sizeof(kr6));
781 kr6.af = AF_INET6;
782 kr6.prefix.v6 = in6addr_loopback;
783 kr6.prefixlen = 128;
784 kr6.flags = F_KERNEL|F_CONNECTED;
785 kroute_insert(&kr6);
786 }
787
788 /* misc */
789 static uint8_t
prefixlen_classful(in_addr_t ina)790 prefixlen_classful(in_addr_t ina)
791 {
792 /* it hurt to write this. */
793
794 if (ina >= 0xf0000000U) /* class E */
795 return (32);
796 else if (ina >= 0xe0000000U) /* class D */
797 return (4);
798 else if (ina >= 0xc0000000U) /* class C */
799 return (24);
800 else if (ina >= 0x80000000U) /* class B */
801 return (16);
802 else /* class A */
803 return (8);
804 }
805
806 #define ROUNDUP(a) \
807 ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
808
809 static void
get_rtaddrs(int addrs,struct sockaddr * sa,struct sockaddr ** rti_info)810 get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info)
811 {
812 int i;
813
814 for (i = 0; i < RTAX_MAX; i++) {
815 if (addrs & (1 << i)) {
816 rti_info[i] = sa;
817 sa = (struct sockaddr *)((char *)(sa) +
818 ROUNDUP(sa->sa_len));
819 } else
820 rti_info[i] = NULL;
821 }
822 }
823
824 static void
if_change(unsigned short ifindex,int flags,struct if_data * ifd,struct sockaddr_dl * sdl)825 if_change(unsigned short ifindex, int flags, struct if_data *ifd,
826 struct sockaddr_dl *sdl)
827 {
828 struct kroute_prefix *kp;
829 struct kroute_priority *kprio;
830 struct kroute_node *kn;
831 struct kif *kif;
832 uint8_t reachable;
833
834 if ((kif = kif_update(ifindex, flags, ifd, sdl)) == NULL) {
835 log_warn("%s: kif_update(%u)", __func__, ifindex);
836 return;
837 }
838
839 reachable = (kif->flags & IFF_UP) &&
840 LINK_STATE_IS_UP(kif->link_state);
841
842 if (reachable == kif->nh_reachable)
843 return; /* nothing changed wrt nexthop validity */
844
845 kif->nh_reachable = reachable;
846
847 /* notify eigrpe about link state */
848 main_imsg_compose_eigrpe(IMSG_IFINFO, 0, kif, sizeof(struct kif));
849
850 /* notify rde about link going down */
851 if (!kif->nh_reachable)
852 main_imsg_compose_rde(IMSG_IFDOWN, 0, kif, sizeof(struct kif));
853
854 /* update redistribute list */
855 RB_FOREACH(kp, kroute_tree, &krt) {
856 TAILQ_FOREACH(kprio, &kp->priorities, entry) {
857 TAILQ_FOREACH(kn, &kprio->nexthops, entry) {
858 if (kn->r.ifindex != ifindex)
859 continue;
860
861 if (reachable)
862 kn->r.flags &= ~F_DOWN;
863 else
864 kn->r.flags |= F_DOWN;
865 }
866 }
867 kr_redistribute(kp);
868 }
869 }
870
871 static void
if_newaddr(unsigned short ifindex,struct sockaddr * ifa,struct sockaddr * mask,struct sockaddr * brd)872 if_newaddr(unsigned short ifindex, struct sockaddr *ifa, struct sockaddr *mask,
873 struct sockaddr *brd)
874 {
875 struct kif_node *kif;
876 struct sockaddr_in *ifa4, *mask4, *brd4;
877 struct sockaddr_in6 *ifa6, *mask6, *brd6;
878 struct kif_addr *ka;
879
880 if (ifa == NULL)
881 return;
882 if ((kif = kif_find(ifindex)) == NULL) {
883 log_warnx("%s: corresponding if %d not found", __func__,
884 ifindex);
885 return;
886 }
887
888 switch (ifa->sa_family) {
889 case AF_INET:
890 ifa4 = (struct sockaddr_in *) ifa;
891 mask4 = (struct sockaddr_in *) mask;
892 brd4 = (struct sockaddr_in *) brd;
893
894 /* filter out unwanted addresses */
895 if (bad_addr_v4(ifa4->sin_addr))
896 return;
897
898 if ((ka = calloc(1, sizeof(struct kif_addr))) == NULL)
899 fatal("if_newaddr");
900 ka->a.addr.v4 = ifa4->sin_addr;
901 if (mask4)
902 ka->a.prefixlen =
903 mask2prefixlen(mask4->sin_addr.s_addr);
904 if (brd4)
905 ka->a.dstbrd.v4 = brd4->sin_addr;
906 break;
907 case AF_INET6:
908 ifa6 = (struct sockaddr_in6 *) ifa;
909 mask6 = (struct sockaddr_in6 *) mask;
910 brd6 = (struct sockaddr_in6 *) brd;
911
912 /* We only care about link-local and global-scope. */
913 if (bad_addr_v6(&ifa6->sin6_addr))
914 return;
915
916 clearscope(&ifa6->sin6_addr);
917
918 if ((ka = calloc(1, sizeof(struct kif_addr))) == NULL)
919 fatal("if_newaddr");
920 ka->a.addr.v6 = ifa6->sin6_addr;
921 if (mask6)
922 ka->a.prefixlen = mask2prefixlen6(mask6);
923 if (brd6)
924 ka->a.dstbrd.v6 = brd6->sin6_addr;
925 break;
926 default:
927 return;
928 }
929
930 ka->a.ifindex = ifindex;
931 ka->a.af = ifa->sa_family;
932 TAILQ_INSERT_TAIL(&kif->addrs, ka, entry);
933
934 /* notify eigrpe about new address */
935 main_imsg_compose_eigrpe(IMSG_NEWADDR, 0, &ka->a, sizeof(ka->a));
936 }
937
938 static void
if_deladdr(unsigned short ifindex,struct sockaddr * ifa,struct sockaddr * mask,struct sockaddr * brd)939 if_deladdr(unsigned short ifindex, struct sockaddr *ifa, struct sockaddr *mask,
940 struct sockaddr *brd)
941 {
942 struct kif_node *kif;
943 struct sockaddr_in *ifa4, *mask4, *brd4;
944 struct sockaddr_in6 *ifa6, *mask6, *brd6;
945 struct kaddr k;
946 struct kif_addr *ka, *nka;
947
948 if (ifa == NULL)
949 return;
950 if ((kif = kif_find(ifindex)) == NULL) {
951 log_warnx("%s: corresponding if %d not found", __func__,
952 ifindex);
953 return;
954 }
955
956 memset(&k, 0, sizeof(k));
957 k.af = ifa->sa_family;
958 switch (ifa->sa_family) {
959 case AF_INET:
960 ifa4 = (struct sockaddr_in *) ifa;
961 mask4 = (struct sockaddr_in *) mask;
962 brd4 = (struct sockaddr_in *) brd;
963
964 /* filter out unwanted addresses */
965 if (bad_addr_v4(ifa4->sin_addr))
966 return;
967
968 k.addr.v4 = ifa4->sin_addr;
969 if (mask4)
970 k.prefixlen = mask2prefixlen(mask4->sin_addr.s_addr);
971 if (brd4)
972 k.dstbrd.v4 = brd4->sin_addr;
973 break;
974 case AF_INET6:
975 ifa6 = (struct sockaddr_in6 *) ifa;
976 mask6 = (struct sockaddr_in6 *) mask;
977 brd6 = (struct sockaddr_in6 *) brd;
978
979 /* We only care about link-local and global-scope. */
980 if (bad_addr_v6(&ifa6->sin6_addr))
981 return;
982
983 clearscope(&ifa6->sin6_addr);
984
985 k.addr.v6 = ifa6->sin6_addr;
986 if (mask6)
987 k.prefixlen = mask2prefixlen6(mask6);
988 if (brd6)
989 k.dstbrd.v6 = brd6->sin6_addr;
990 break;
991 default:
992 return;
993 }
994
995 for (ka = TAILQ_FIRST(&kif->addrs); ka != NULL; ka = nka) {
996 nka = TAILQ_NEXT(ka, entry);
997
998 if (ka->a.af != k.af ||
999 ka->a.prefixlen != k.prefixlen ||
1000 eigrp_addrcmp(ka->a.af, &ka->a.addr, &k.addr) ||
1001 eigrp_addrcmp(ka->a.af, &ka->a.dstbrd, &k.dstbrd))
1002 continue;
1003
1004 /* notify eigrpe about removed address */
1005 main_imsg_compose_eigrpe(IMSG_DELADDR, 0, &ka->a,
1006 sizeof(ka->a));
1007 TAILQ_REMOVE(&kif->addrs, ka, entry);
1008 free(ka);
1009 return;
1010 }
1011 }
1012
1013 static void
if_announce(void * msg)1014 if_announce(void *msg)
1015 {
1016 struct if_announcemsghdr *ifan;
1017 struct kif_node *kif;
1018
1019 ifan = msg;
1020
1021 switch (ifan->ifan_what) {
1022 case IFAN_ARRIVAL:
1023 kif = kif_insert(ifan->ifan_index);
1024 if (kif)
1025 strlcpy(kif->k.ifname, ifan->ifan_name,
1026 sizeof(kif->k.ifname));
1027 break;
1028 case IFAN_DEPARTURE:
1029 kif = kif_find(ifan->ifan_index);
1030 if (kif)
1031 kif_remove(kif);
1032 break;
1033 }
1034 }
1035
1036 /* rtsock */
1037 static int
send_rtmsg_v4(int fd,int action,struct kroute * kr)1038 send_rtmsg_v4(int fd, int action, struct kroute *kr)
1039 {
1040 struct iovec iov[5];
1041 struct rt_msghdr hdr;
1042 struct sockaddr_in prefix;
1043 struct sockaddr_in nexthop;
1044 struct sockaddr_in mask;
1045 int iovcnt = 0;
1046
1047 if (kr_state.fib_sync == 0)
1048 return (0);
1049
1050 /* initialize header */
1051 memset(&hdr, 0, sizeof(hdr));
1052 hdr.rtm_version = RTM_VERSION;
1053 hdr.rtm_type = action;
1054 hdr.rtm_priority = kr->priority;
1055 hdr.rtm_tableid = kr_state.rdomain; /* rtableid */
1056 if (action == RTM_CHANGE)
1057 hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
1058 else
1059 hdr.rtm_flags = RTF_MPATH;
1060 if (kr->flags & F_BLACKHOLE)
1061 hdr.rtm_flags |= RTF_BLACKHOLE;
1062 hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */
1063 hdr.rtm_msglen = sizeof(hdr);
1064 /* adjust iovec */
1065 iov[iovcnt].iov_base = &hdr;
1066 iov[iovcnt++].iov_len = sizeof(hdr);
1067
1068 memset(&prefix, 0, sizeof(prefix));
1069 prefix.sin_len = sizeof(prefix);
1070 prefix.sin_family = AF_INET;
1071 prefix.sin_addr = kr->prefix.v4;
1072 /* adjust header */
1073 hdr.rtm_addrs |= RTA_DST;
1074 hdr.rtm_msglen += sizeof(prefix);
1075 /* adjust iovec */
1076 iov[iovcnt].iov_base = &prefix;
1077 iov[iovcnt++].iov_len = sizeof(prefix);
1078
1079 if (kr->nexthop.v4.s_addr != 0) {
1080 memset(&nexthop, 0, sizeof(nexthop));
1081 nexthop.sin_len = sizeof(nexthop);
1082 nexthop.sin_family = AF_INET;
1083 nexthop.sin_addr = kr->nexthop.v4;
1084 /* adjust header */
1085 hdr.rtm_flags |= RTF_GATEWAY;
1086 hdr.rtm_addrs |= RTA_GATEWAY;
1087 hdr.rtm_msglen += sizeof(nexthop);
1088 /* adjust iovec */
1089 iov[iovcnt].iov_base = &nexthop;
1090 iov[iovcnt++].iov_len = sizeof(nexthop);
1091 }
1092
1093 memset(&mask, 0, sizeof(mask));
1094 mask.sin_len = sizeof(mask);
1095 mask.sin_family = AF_INET;
1096 mask.sin_addr.s_addr = prefixlen2mask(kr->prefixlen);
1097 /* adjust header */
1098 hdr.rtm_addrs |= RTA_NETMASK;
1099 hdr.rtm_msglen += sizeof(mask);
1100 /* adjust iovec */
1101 iov[iovcnt].iov_base = &mask;
1102 iov[iovcnt++].iov_len = sizeof(mask);
1103
1104 retry:
1105 if (writev(fd, iov, iovcnt) == -1) {
1106 if (errno == ESRCH) {
1107 if (hdr.rtm_type == RTM_CHANGE) {
1108 hdr.rtm_type = RTM_ADD;
1109 goto retry;
1110 } else if (hdr.rtm_type == RTM_DELETE) {
1111 log_info("route %s/%u vanished before delete",
1112 inet_ntoa(kr->prefix.v4),
1113 kr->prefixlen);
1114 return (0);
1115 }
1116 }
1117 log_warn("%s: action %u, prefix %s/%u", __func__, hdr.rtm_type,
1118 inet_ntoa(kr->prefix.v4), kr->prefixlen);
1119 return (0);
1120 }
1121
1122 return (0);
1123 }
1124
1125 static int
send_rtmsg_v6(int fd,int action,struct kroute * kr)1126 send_rtmsg_v6(int fd, int action, struct kroute *kr)
1127 {
1128 struct iovec iov[5];
1129 struct rt_msghdr hdr;
1130 struct pad {
1131 struct sockaddr_in6 addr;
1132 char pad[sizeof(long)]; /* thank you IPv6 */
1133 } prefix, nexthop, mask;
1134 int iovcnt = 0;
1135
1136 if (kr_state.fib_sync == 0)
1137 return (0);
1138
1139 /* initialize header */
1140 memset(&hdr, 0, sizeof(hdr));
1141 hdr.rtm_version = RTM_VERSION;
1142 hdr.rtm_type = action;
1143 hdr.rtm_priority = kr->priority;
1144 hdr.rtm_tableid = kr_state.rdomain; /* rtableid */
1145 if (action == RTM_CHANGE)
1146 hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
1147 else
1148 hdr.rtm_flags = RTF_MPATH;
1149 hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */
1150 hdr.rtm_msglen = sizeof(hdr);
1151 /* adjust iovec */
1152 iov[iovcnt].iov_base = &hdr;
1153 iov[iovcnt++].iov_len = sizeof(hdr);
1154
1155 memset(&prefix, 0, sizeof(prefix));
1156 prefix.addr.sin6_len = sizeof(struct sockaddr_in6);
1157 prefix.addr.sin6_family = AF_INET6;
1158 prefix.addr.sin6_addr = kr->prefix.v6;
1159 /* adjust header */
1160 hdr.rtm_addrs |= RTA_DST;
1161 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
1162 /* adjust iovec */
1163 iov[iovcnt].iov_base = &prefix;
1164 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
1165
1166 if (!IN6_IS_ADDR_UNSPECIFIED(&kr->nexthop.v6)) {
1167 memset(&nexthop, 0, sizeof(nexthop));
1168 nexthop.addr.sin6_len = sizeof(struct sockaddr_in6);
1169 nexthop.addr.sin6_family = AF_INET6;
1170 nexthop.addr.sin6_addr = kr->nexthop.v6;
1171 nexthop.addr.sin6_scope_id = kr->ifindex;
1172 embedscope(&nexthop.addr);
1173
1174 /* adjust header */
1175 hdr.rtm_flags |= RTF_GATEWAY;
1176 hdr.rtm_addrs |= RTA_GATEWAY;
1177 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
1178 /* adjust iovec */
1179 iov[iovcnt].iov_base = &nexthop;
1180 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
1181 }
1182
1183 memset(&mask, 0, sizeof(mask));
1184 mask.addr.sin6_len = sizeof(struct sockaddr_in6);
1185 mask.addr.sin6_family = AF_INET6;
1186 mask.addr.sin6_addr = *prefixlen2mask6(kr->prefixlen);
1187 /* adjust header */
1188 if (kr->prefixlen == 128)
1189 hdr.rtm_flags |= RTF_HOST;
1190 hdr.rtm_addrs |= RTA_NETMASK;
1191 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
1192 /* adjust iovec */
1193 iov[iovcnt].iov_base = &mask;
1194 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
1195
1196 retry:
1197 if (writev(fd, iov, iovcnt) == -1) {
1198 if (errno == ESRCH) {
1199 if (hdr.rtm_type == RTM_CHANGE) {
1200 hdr.rtm_type = RTM_ADD;
1201 goto retry;
1202 } else if (hdr.rtm_type == RTM_DELETE) {
1203 log_info("route %s/%u vanished before delete",
1204 log_in6addr(&kr->prefix.v6),
1205 kr->prefixlen);
1206 return (0);
1207 }
1208 }
1209 log_warn("%s: action %u, prefix %s/%u", __func__, hdr.rtm_type,
1210 log_in6addr(&kr->prefix.v6), kr->prefixlen);
1211 return (0);
1212 }
1213
1214 return (0);
1215 }
1216
1217 static int
send_rtmsg(int fd,int action,struct kroute * kr)1218 send_rtmsg(int fd, int action, struct kroute *kr)
1219 {
1220 switch (kr->af) {
1221 case AF_INET:
1222 return (send_rtmsg_v4(fd, action, kr));
1223 case AF_INET6:
1224 return (send_rtmsg_v6(fd, action, kr));
1225 default:
1226 break;
1227 }
1228
1229 return (-1);
1230 }
1231
1232 static int
fetchtable(void)1233 fetchtable(void)
1234 {
1235 size_t len;
1236 int mib[7];
1237 char *buf;
1238 int rv;
1239
1240 mib[0] = CTL_NET;
1241 mib[1] = PF_ROUTE;
1242 mib[2] = 0;
1243 mib[3] = 0;
1244 mib[4] = NET_RT_DUMP;
1245 mib[5] = 0;
1246 mib[6] = kr_state.rdomain; /* rtableid */
1247
1248 if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) {
1249 log_warn("sysctl");
1250 return (-1);
1251 }
1252 if ((buf = malloc(len)) == NULL) {
1253 log_warn("%s", __func__);
1254 return (-1);
1255 }
1256 if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) {
1257 log_warn("sysctl");
1258 free(buf);
1259 return (-1);
1260 }
1261
1262 rv = rtmsg_process(buf, len);
1263 free(buf);
1264
1265 return (rv);
1266 }
1267
1268 static int
fetchifs(void)1269 fetchifs(void)
1270 {
1271 size_t len;
1272 int mib[6];
1273 char *buf;
1274 int rv;
1275
1276 mib[0] = CTL_NET;
1277 mib[1] = PF_ROUTE;
1278 mib[2] = 0;
1279 mib[3] = 0; /* wildcard */
1280 mib[4] = NET_RT_IFLIST;
1281 mib[5] = 0;
1282
1283 if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) {
1284 log_warn("sysctl");
1285 return (-1);
1286 }
1287 if ((buf = malloc(len)) == NULL) {
1288 log_warn("%s", __func__);
1289 return (-1);
1290 }
1291 if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) {
1292 log_warn("sysctl");
1293 free(buf);
1294 return (-1);
1295 }
1296
1297 rv = rtmsg_process(buf, len);
1298 free(buf);
1299
1300 return (rv);
1301 }
1302
1303 static int
dispatch_rtmsg(void)1304 dispatch_rtmsg(void)
1305 {
1306 char buf[RT_BUF_SIZE];
1307 ssize_t n;
1308
1309 if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) {
1310 if (errno == EAGAIN || errno == EINTR)
1311 return (0);
1312 log_warn("%s: read error", __func__);
1313 return (-1);
1314 }
1315
1316 if (n == 0) {
1317 log_warnx("routing socket closed");
1318 return (-1);
1319 }
1320
1321 return (rtmsg_process(buf, n));
1322 }
1323
1324 static int
rtmsg_process(char * buf,size_t len)1325 rtmsg_process(char *buf, size_t len)
1326 {
1327 struct rt_msghdr *rtm;
1328 struct if_msghdr ifm;
1329 struct ifa_msghdr *ifam;
1330 struct sockaddr *sa, *rti_info[RTAX_MAX];
1331 size_t offset;
1332 char *next;
1333
1334 for (offset = 0; offset < len; offset += rtm->rtm_msglen) {
1335 next = buf + offset;
1336 rtm = (struct rt_msghdr *)next;
1337 if (len < offset + sizeof(unsigned short) ||
1338 len < offset + rtm->rtm_msglen)
1339 fatalx("rtmsg_process: partial rtm in buffer");
1340 if (rtm->rtm_version != RTM_VERSION)
1341 continue;
1342
1343 sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
1344 get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
1345
1346 switch (rtm->rtm_type) {
1347 case RTM_ADD:
1348 case RTM_GET:
1349 case RTM_CHANGE:
1350 case RTM_DELETE:
1351 if (rtm->rtm_errno) /* failed attempts... */
1352 continue;
1353
1354 if (rtm->rtm_tableid != kr_state.rdomain)
1355 continue;
1356
1357 if (rtm->rtm_type == RTM_GET &&
1358 rtm->rtm_pid != kr_state.pid)
1359 continue;
1360
1361 /* Skip ARP/ND cache and broadcast routes. */
1362 if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST))
1363 continue;
1364
1365 if (rtmsg_process_route(rtm, rti_info) == -1)
1366 return (-1);
1367 }
1368
1369 switch (rtm->rtm_type) {
1370 case RTM_IFINFO:
1371 memcpy(&ifm, next, sizeof(ifm));
1372 if_change(ifm.ifm_index, ifm.ifm_flags, &ifm.ifm_data,
1373 (struct sockaddr_dl *)rti_info[RTAX_IFP]);
1374 break;
1375 case RTM_NEWADDR:
1376 ifam = (struct ifa_msghdr *)rtm;
1377 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA |
1378 RTA_BRD)) == 0)
1379 break;
1380
1381 if_newaddr(ifam->ifam_index,
1382 (struct sockaddr *)rti_info[RTAX_IFA],
1383 (struct sockaddr *)rti_info[RTAX_NETMASK],
1384 (struct sockaddr *)rti_info[RTAX_BRD]);
1385 break;
1386 case RTM_DELADDR:
1387 ifam = (struct ifa_msghdr *)rtm;
1388 if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA |
1389 RTA_BRD)) == 0)
1390 break;
1391
1392 if_deladdr(ifam->ifam_index,
1393 (struct sockaddr *)rti_info[RTAX_IFA],
1394 (struct sockaddr *)rti_info[RTAX_NETMASK],
1395 (struct sockaddr *)rti_info[RTAX_BRD]);
1396 break;
1397 case RTM_IFANNOUNCE:
1398 if_announce(next);
1399 break;
1400 default:
1401 /* ignore for now */
1402 break;
1403 }
1404 }
1405
1406 return (offset);
1407 }
1408
1409 static int
rtmsg_process_route(struct rt_msghdr * rtm,struct sockaddr * rti_info[RTAX_MAX])1410 rtmsg_process_route(struct rt_msghdr *rtm, struct sockaddr *rti_info[RTAX_MAX])
1411 {
1412 struct sockaddr *sa;
1413 struct sockaddr_in *sa_in;
1414 struct sockaddr_in6 *sa_in6;
1415 struct kroute kr;
1416 struct kroute_prefix *kp;
1417 struct kroute_priority *kprio;
1418 struct kroute_node *kn;
1419
1420 if ((sa = rti_info[RTAX_DST]) == NULL)
1421 return (-1);
1422
1423 memset(&kr, 0, sizeof(kr));
1424 kr.af = sa->sa_family;
1425 switch (kr.af) {
1426 case AF_INET:
1427 kr.prefix.v4 = ((struct sockaddr_in *)sa)->sin_addr;
1428 sa_in = (struct sockaddr_in *) rti_info[RTAX_NETMASK];
1429 if (sa_in != NULL && sa_in->sin_len != 0)
1430 kr.prefixlen = mask2prefixlen(sa_in->sin_addr.s_addr);
1431 else if (rtm->rtm_flags & RTF_HOST)
1432 kr.prefixlen = 32;
1433 else if (kr.prefix.v4.s_addr == INADDR_ANY)
1434 kr.prefixlen = 0;
1435 else
1436 kr.prefixlen = prefixlen_classful(kr.prefix.v4.s_addr);
1437 break;
1438 case AF_INET6:
1439 kr.prefix.v6 = ((struct sockaddr_in6 *)sa)->sin6_addr;
1440 sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK];
1441 if (sa_in6 != NULL && sa_in6->sin6_len != 0)
1442 kr.prefixlen = mask2prefixlen6(sa_in6);
1443 else if (rtm->rtm_flags & RTF_HOST)
1444 kr.prefixlen = 128;
1445 else if (IN6_IS_ADDR_UNSPECIFIED(&kr.prefix.v6))
1446 kr.prefixlen = 0;
1447 else
1448 fatalx("in6 net addr without netmask");
1449 break;
1450 default:
1451 return (0);
1452 }
1453 kr.ifindex = rtm->rtm_index;
1454 if ((sa = rti_info[RTAX_GATEWAY]) != NULL) {
1455 switch (sa->sa_family) {
1456 case AF_INET:
1457 kr.nexthop.v4 = ((struct sockaddr_in *)sa)->sin_addr;
1458 break;
1459 case AF_INET6:
1460 sa_in6 = (struct sockaddr_in6 *)sa;
1461 recoverscope(sa_in6);
1462 kr.nexthop.v6 = sa_in6->sin6_addr;
1463 if (sa_in6->sin6_scope_id)
1464 kr.ifindex = sa_in6->sin6_scope_id;
1465 break;
1466 case AF_LINK:
1467 kr.flags |= F_CONNECTED;
1468 break;
1469 }
1470 }
1471 kr.flags |= F_KERNEL;
1472 if (rtm->rtm_flags & RTF_STATIC)
1473 kr.flags |= F_STATIC;
1474 if (rtm->rtm_flags & RTF_BLACKHOLE)
1475 kr.flags |= F_BLACKHOLE;
1476 if (rtm->rtm_flags & RTF_REJECT)
1477 kr.flags |= F_REJECT;
1478 if (rtm->rtm_flags & RTF_DYNAMIC)
1479 kr.flags |= F_DYNAMIC;
1480 if (rtm->rtm_flags & RTF_CONNECTED)
1481 kr.flags |= F_CONNECTED;
1482 kr.priority = rtm->rtm_priority;
1483
1484 if (rtm->rtm_type == RTM_CHANGE) {
1485 /*
1486 * The kernel doesn't allow RTM_CHANGE for multipath routes.
1487 * If we got this message we know that the route has only one
1488 * nexthop and we should remove it before installing the same
1489 * route with the new nexthop.
1490 */
1491 kp = kroute_find_prefix(kr.af, &kr.prefix, kr.prefixlen);
1492 if (kp) {
1493 kprio = kroute_find_prio(kp, kr.priority);
1494 if (kprio) {
1495 kn = TAILQ_FIRST(&kprio->nexthops);
1496 if (kn)
1497 kroute_remove(&kn->r);
1498 }
1499 }
1500 }
1501
1502 kn = NULL;
1503 kp = kroute_find_prefix(kr.af, &kr.prefix, kr.prefixlen);
1504 if (kp) {
1505 kprio = kroute_find_prio(kp, kr.priority);
1506 if (kprio)
1507 kn = kroute_find_gw(kprio, &kr.nexthop);
1508 }
1509
1510 if (rtm->rtm_type == RTM_DELETE) {
1511 if (kn == NULL || !(kn->r.flags & F_KERNEL))
1512 return (0);
1513 return (kroute_remove(&kr));
1514 }
1515
1516 if (!eigrp_addrisset(kr.af, &kr.nexthop) && !(kr.flags & F_CONNECTED)) {
1517 log_warnx("%s: no nexthop for %s/%u", __func__,
1518 log_addr(kr.af, &kr.prefix), kr.prefixlen);
1519 return (-1);
1520 }
1521
1522 if (kn != NULL) {
1523 /* update route */
1524 kn->r = kr;
1525
1526 if (kif_validate(kn->r.ifindex))
1527 kn->r.flags &= ~F_DOWN;
1528 else
1529 kn->r.flags |= F_DOWN;
1530
1531 kr_redistribute(kp);
1532 } else {
1533 if ((rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_GET) &&
1534 (kr.priority == eigrpd_conf->fib_priority_internal ||
1535 kr.priority == eigrpd_conf->fib_priority_external ||
1536 kr.priority == eigrpd_conf->fib_priority_summary)) {
1537 log_warnx("alien EIGRP route %s/%d", log_addr(kr.af,
1538 &kr.prefix), kr.prefixlen);
1539 return (send_rtmsg(kr_state.fd, RTM_DELETE, &kr));
1540 }
1541
1542 kroute_insert(&kr);
1543 }
1544
1545 return (0);
1546 }
1547