xref: /openbsd/usr.sbin/eigrpd/rde.c (revision 097a140d)
1 /*	$OpenBSD: rde.c,v 1.24 2021/01/19 10:53:25 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/types.h>
23 #include <sys/socket.h>
24 #include <net/route.h>
25 
26 #include <errno.h>
27 #include <pwd.h>
28 #include <signal.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <unistd.h>
32 
33 #include "eigrpd.h"
34 #include "eigrpe.h"
35 #include "rde.h"
36 #include "log.h"
37 
38 static void		 rde_sig_handler(int sig, short, void *);
39 static __dead void	 rde_shutdown(void);
40 static void		 rde_dispatch_imsg(int, short, void *);
41 static void		 rde_dispatch_parent(int, short, void *);
42 static struct redistribute *eigrp_redistribute(struct eigrp *, struct kroute *);
43 static void		 rt_redist_set(struct kroute *, int);
44 static void		 rt_snap(struct rde_nbr *);
45 static struct ctl_rt	*rt_to_ctl(struct rt_node *, struct eigrp_route *);
46 static void		 rt_dump(struct ctl_show_topology_req *, pid_t);
47 
48 struct eigrpd_conf	*rdeconf;
49 
50 static struct imsgev	*iev_eigrpe;
51 static struct imsgev	*iev_main;
52 
53 /* ARGSUSED */
54 static void
55 rde_sig_handler(int sig, short event, void *arg)
56 {
57 	/*
58 	 * signal handler rules don't apply, libevent decouples for us
59 	 */
60 
61 	switch (sig) {
62 	case SIGINT:
63 	case SIGTERM:
64 		rde_shutdown();
65 		/* NOTREACHED */
66 	default:
67 		fatalx("unexpected signal");
68 	}
69 }
70 
71 /* route decision engine */
72 void
73 rde(int debug, int verbose)
74 {
75 	struct event		 ev_sigint, ev_sigterm;
76 	struct timeval		 now;
77 	struct passwd		*pw;
78 
79 	rdeconf = config_new_empty();
80 
81 	log_init(debug);
82 	log_verbose(verbose);
83 
84 	if ((pw = getpwnam(EIGRPD_USER)) == NULL)
85 		fatal("getpwnam");
86 
87 	if (chroot(pw->pw_dir) == -1)
88 		fatal("chroot");
89 	if (chdir("/") == -1)
90 		fatal("chdir(\"/\")");
91 
92 	setproctitle("route decision engine");
93 	log_procname = "rde";
94 
95 	if (setgroups(1, &pw->pw_gid) ||
96 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
97 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
98 		fatal("can't drop privileges");
99 
100 	if (pledge("stdio recvfd", NULL) == -1)
101 		fatal("pledge");
102 
103 	event_init();
104 
105 	/* setup signal handler */
106 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
107 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
108 	signal_add(&ev_sigint, NULL);
109 	signal_add(&ev_sigterm, NULL);
110 	signal(SIGPIPE, SIG_IGN);
111 	signal(SIGHUP, SIG_IGN);
112 
113 	/* setup pipe and event handler to the parent process */
114 	if ((iev_main = malloc(sizeof(struct imsgev))) == NULL)
115 		fatal(NULL);
116 	imsg_init(&iev_main->ibuf, 3);
117 	iev_main->handler = rde_dispatch_parent;
118 	iev_main->events = EV_READ;
119 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
120 	    iev_main->handler, iev_main);
121 	event_add(&iev_main->ev, NULL);
122 
123 	gettimeofday(&now, NULL);
124 	global.uptime = now.tv_sec;
125 
126 	event_dispatch();
127 
128 	rde_shutdown();
129 }
130 
131 static __dead void
132 rde_shutdown(void)
133 {
134 	/* close pipes */
135 	msgbuf_clear(&iev_eigrpe->ibuf.w);
136 	close(iev_eigrpe->ibuf.fd);
137 	msgbuf_clear(&iev_main->ibuf.w);
138 	close(iev_main->ibuf.fd);
139 
140 	config_clear(rdeconf, PROC_RDE_ENGINE);
141 
142 	free(iev_eigrpe);
143 	free(iev_main);
144 
145 	log_info("route decision engine exiting");
146 	exit(0);
147 }
148 
149 int
150 rde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen)
151 {
152 	return (imsg_compose_event(iev_main, type, 0, pid, -1,
153 	    data, datalen));
154 }
155 
156 int
157 rde_imsg_compose_eigrpe(int type, uint32_t peerid, pid_t pid, void *data,
158     uint16_t datalen)
159 {
160 	return (imsg_compose_event(iev_eigrpe, type, peerid, pid, -1,
161 	    data, datalen));
162 }
163 
164 /* ARGSUSED */
165 static void
166 rde_dispatch_imsg(int fd, short event, void *bula)
167 {
168 	struct imsgev		*iev = bula;
169 	struct imsgbuf		*ibuf;
170 	struct imsg		 imsg;
171 	struct rde_nbr		*nbr;
172 	struct rde_nbr		 new;
173 	struct rinfo		 rinfo;
174 	ssize_t			 n;
175 	int			 shut = 0, verbose;
176 
177 	ibuf = &iev->ibuf;
178 
179 	if (event & EV_READ) {
180 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
181 			fatal("imsg_read error");
182 		if (n == 0)	/* connection closed */
183 			shut = 1;
184 	}
185 	if (event & EV_WRITE) {
186 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
187 			fatal("msgbuf_write");
188 		if (n == 0)	/* connection closed */
189 			shut = 1;
190 	}
191 
192 	for (;;) {
193 		if ((n = imsg_get(ibuf, &imsg)) == -1)
194 			fatal("rde_dispatch_imsg: imsg_get error");
195 		if (n == 0)
196 			break;
197 
198 		switch (imsg.hdr.type) {
199 		case IMSG_NEIGHBOR_UP:
200 			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
201 			    sizeof(struct rde_nbr))
202 				fatalx("invalid size of neighbor request");
203 			memcpy(&new, imsg.data, sizeof(new));
204 
205 			if (rde_nbr_find(imsg.hdr.peerid))
206 				fatalx("rde_dispatch_imsg: "
207 				    "neighbor already exists");
208 			rde_nbr_new(imsg.hdr.peerid, &new);
209 			break;
210 		case IMSG_NEIGHBOR_DOWN:
211 			nbr = rde_nbr_find(imsg.hdr.peerid);
212 			if (nbr == NULL) {
213 				log_debug("%s: cannot find rde neighbor",
214 				    __func__);
215 				break;
216 			}
217 
218 			rde_check_link_down_nbr(nbr);
219 			rde_flush_queries();
220 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid), 0);
221 			break;
222 		case IMSG_RECV_UPDATE_INIT:
223 			nbr = rde_nbr_find(imsg.hdr.peerid);
224 			if (nbr == NULL) {
225 				log_debug("%s: cannot find rde neighbor",
226 				    __func__);
227 				break;
228 			}
229 
230 			rt_snap(nbr);
231 			break;
232 		case IMSG_RECV_UPDATE:
233 		case IMSG_RECV_QUERY:
234 		case IMSG_RECV_REPLY:
235 		case IMSG_RECV_SIAQUERY:
236 		case IMSG_RECV_SIAREPLY:
237 			nbr = rde_nbr_find(imsg.hdr.peerid);
238 			if (nbr == NULL) {
239 				log_debug("%s: cannot find rde neighbor",
240 				    __func__);
241 				break;
242 			}
243 
244 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rinfo))
245 				fatalx("invalid size of rinfo");
246 			memcpy(&rinfo, imsg.data, sizeof(rinfo));
247 
248 			switch (imsg.hdr.type) {
249 			case IMSG_RECV_UPDATE:
250 				rde_check_update(nbr, &rinfo);
251 				break;
252 			case IMSG_RECV_QUERY:
253 				rde_check_query(nbr, &rinfo, 0);
254 				break;
255 			case IMSG_RECV_REPLY:
256 				rde_check_reply(nbr, &rinfo, 0);
257 				break;
258 			case IMSG_RECV_SIAQUERY:
259 				rde_check_query(nbr, &rinfo, 1);
260 				break;
261 			case IMSG_RECV_SIAREPLY:
262 				rde_check_reply(nbr, &rinfo, 1);
263 				break;
264 			}
265 			break;
266 		case IMSG_CTL_SHOW_TOPOLOGY:
267 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
268 			    sizeof(struct ctl_show_topology_req)) {
269 				log_warnx("%s: wrong imsg len", __func__);
270 				break;
271 			}
272 
273 			rt_dump(imsg.data, imsg.hdr.pid);
274 			rde_imsg_compose_eigrpe(IMSG_CTL_END, 0, imsg.hdr.pid,
275 			    NULL, 0);
276 			break;
277 		case IMSG_CTL_LOG_VERBOSE:
278 			/* already checked by eigrpe */
279 			memcpy(&verbose, imsg.data, sizeof(verbose));
280 			log_verbose(verbose);
281 			break;
282 		default:
283 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
284 			    imsg.hdr.type);
285 			break;
286 		}
287 		imsg_free(&imsg);
288 	}
289 	if (!shut)
290 		imsg_event_add(iev);
291 	else {
292 		/* this pipe is dead, so remove the event handler */
293 		event_del(&iev->ev);
294 		event_loopexit(NULL);
295 	}
296 }
297 
298 /* ARGSUSED */
299 static void
300 rde_dispatch_parent(int fd, short event, void *bula)
301 {
302 	static struct eigrpd_conf *nconf;
303 	static struct iface	*niface;
304 	static struct eigrp	*neigrp;
305 	struct eigrp_iface	*nei;
306 	struct imsg		 imsg;
307 	struct imsgev		*iev = bula;
308 	struct imsgbuf		*ibuf;
309 	struct kif		*kif;
310 	ssize_t			 n;
311 	int			 shut = 0;
312 
313 	ibuf = &iev->ibuf;
314 
315 	if (event & EV_READ) {
316 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
317 			fatal("imsg_read error");
318 		if (n == 0)	/* connection closed */
319 			shut = 1;
320 	}
321 	if (event & EV_WRITE) {
322 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
323 			fatal("msgbuf_write");
324 		if (n == 0)	/* connection closed */
325 			shut = 1;
326 	}
327 
328 	for (;;) {
329 		if ((n = imsg_get(ibuf, &imsg)) == -1)
330 			fatal("rde_dispatch_parent: imsg_get error");
331 		if (n == 0)
332 			break;
333 
334 		switch (imsg.hdr.type) {
335 		case IMSG_IFDOWN:
336 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
337 			    sizeof(struct kif))
338 				fatalx("IFDOWN imsg with wrong len");
339 			kif = imsg.data;
340 			rde_check_link_down(kif->ifindex);
341 			break;
342 		case IMSG_NETWORK_ADD:
343 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
344 			    sizeof(struct kroute))
345 				fatalx("IMSG_NETWORK_ADD imsg with wrong len");
346 			rt_redist_set(imsg.data, 0);
347 			break;
348 		case IMSG_NETWORK_DEL:
349 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
350 			    sizeof(struct kroute))
351 				fatalx("IMSG_NETWORK_DEL imsg with wrong len");
352 			rt_redist_set(imsg.data, 1);
353 			break;
354 		case IMSG_SOCKET_IPC:
355 			if (iev_eigrpe) {
356 				log_warnx("%s: received unexpected imsg fd "
357 				    "to eigrpe", __func__);
358 				break;
359 			}
360 			if ((fd = imsg.fd) == -1) {
361 				log_warnx("%s: expected to receive imsg fd to "
362 				    "eigrpe but didn't receive any", __func__);
363 				break;
364 			}
365 
366 			iev_eigrpe = malloc(sizeof(struct imsgev));
367 			if (iev_eigrpe == NULL)
368 				fatal(NULL);
369 			imsg_init(&iev_eigrpe->ibuf, fd);
370 			iev_eigrpe->handler = rde_dispatch_imsg;
371 			iev_eigrpe->events = EV_READ;
372 			event_set(&iev_eigrpe->ev, iev_eigrpe->ibuf.fd,
373 			    iev_eigrpe->events, iev_eigrpe->handler,
374 			    iev_eigrpe);
375 			event_add(&iev_eigrpe->ev, NULL);
376 			break;
377 		case IMSG_RECONF_CONF:
378 			if ((nconf = malloc(sizeof(struct eigrpd_conf))) ==
379 			    NULL)
380 				fatal(NULL);
381 			memcpy(nconf, imsg.data, sizeof(struct eigrpd_conf));
382 
383 			TAILQ_INIT(&nconf->iface_list);
384 			TAILQ_INIT(&nconf->instances);
385 			break;
386 		case IMSG_RECONF_INSTANCE:
387 			if ((neigrp = malloc(sizeof(struct eigrp))) == NULL)
388 				fatal(NULL);
389 			memcpy(neigrp, imsg.data, sizeof(struct eigrp));
390 
391 			SIMPLEQ_INIT(&neigrp->redist_list);
392 			TAILQ_INIT(&neigrp->ei_list);
393 			RB_INIT(&neigrp->nbrs);
394 			RB_INIT(&neigrp->topology);
395 			TAILQ_INSERT_TAIL(&nconf->instances, neigrp, entry);
396 			break;
397 		case IMSG_RECONF_IFACE:
398 			niface = imsg.data;
399 			niface = if_lookup(nconf, niface->ifindex);
400 			if (niface)
401 				break;
402 
403 			if ((niface = malloc(sizeof(struct iface))) == NULL)
404 				fatal(NULL);
405 			memcpy(niface, imsg.data, sizeof(struct iface));
406 
407 			TAILQ_INIT(&niface->ei_list);
408 			TAILQ_INIT(&niface->addr_list);
409 			TAILQ_INSERT_TAIL(&nconf->iface_list, niface, entry);
410 			break;
411 		case IMSG_RECONF_EIGRP_IFACE:
412 			if (niface == NULL)
413 				break;
414 			if ((nei = malloc(sizeof(struct eigrp_iface))) == NULL)
415 				fatal(NULL);
416 			memcpy(nei, imsg.data, sizeof(struct eigrp_iface));
417 
418 			nei->iface = niface;
419 			nei->eigrp = neigrp;
420 			TAILQ_INIT(&nei->nbr_list);
421 			TAILQ_INIT(&nei->update_list);
422 			TAILQ_INIT(&nei->query_list);
423 			TAILQ_INIT(&nei->summary_list);
424 			TAILQ_INSERT_TAIL(&niface->ei_list, nei, i_entry);
425 			TAILQ_INSERT_TAIL(&neigrp->ei_list, nei, e_entry);
426 			if (RB_INSERT(iface_id_head, &ifaces_by_id, nei) !=
427 			    NULL)
428 				fatalx("rde_dispatch_parent: "
429 				    "RB_INSERT(ifaces_by_id) failed");
430 			break;
431 		case IMSG_RECONF_END:
432 			merge_config(rdeconf, nconf, PROC_RDE_ENGINE);
433 			nconf = NULL;
434 			break;
435 		default:
436 			log_debug("%s: unexpected imsg %d", __func__,
437 			    imsg.hdr.type);
438 			break;
439 		}
440 		imsg_free(&imsg);
441 	}
442 	if (!shut)
443 		imsg_event_add(iev);
444 	else {
445 		/* this pipe is dead, so remove the event handler */
446 		event_del(&iev->ev);
447 		event_loopexit(NULL);
448 	}
449 }
450 
451 void
452 rde_instance_init(struct eigrp *eigrp)
453 {
454 	struct rde_nbr		 nbr;
455 
456 	memset(&nbr, 0, sizeof(nbr));
457 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_REDIST;
458 	eigrp->rnbr_redist = rde_nbr_new(NBR_IDSELF, &nbr);
459 	eigrp->rnbr_redist->eigrp = eigrp;
460 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_SUMMARY;
461 	eigrp->rnbr_summary = rde_nbr_new(NBR_IDSELF, &nbr);
462 	eigrp->rnbr_summary->eigrp = eigrp;
463 }
464 
465 void
466 rde_instance_del(struct eigrp *eigrp)
467 {
468 	struct rde_nbr		*nbr, *safe;
469 	struct rt_node		*rn;
470 
471 	/* clear topology */
472 	while((rn = RB_MIN(rt_tree, &eigrp->topology)) != NULL)
473 		rt_del(rn);
474 
475 	/* clear nbrs */
476 	RB_FOREACH_SAFE(nbr, rde_nbr_head, &rde_nbrs, safe)
477 		if (nbr->eigrp == eigrp)
478 			rde_nbr_del(nbr, 0);
479 	rde_nbr_del(eigrp->rnbr_redist, 0);
480 	rde_nbr_del(eigrp->rnbr_summary, 0);
481 
482 	free(eigrp);
483 }
484 
485 void
486 rde_send_change_kroute(struct rt_node *rn, struct eigrp_route *route)
487 {
488 	struct eigrp	*eigrp = route->nbr->eigrp;
489 	struct kroute	 kr;
490 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
491 
492 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
493 	    log_addr(eigrp->af, &route->nbr->addr));
494 
495 	memset(&kr, 0, sizeof(kr));
496 	kr.af = eigrp->af;
497 	kr.prefix = rn->prefix;
498 	kr.prefixlen = rn->prefixlen;
499 	if (route->nbr->ei) {
500 		kr.nexthop = route->nexthop;
501 		kr.ifindex = route->nbr->ei->iface->ifindex;
502 	} else {
503 		switch (eigrp->af) {
504 		case AF_INET:
505 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
506 			break;
507 		case AF_INET6:
508 			kr.nexthop.v6 = lo6;
509 			break;
510 		default:
511 			fatalx("rde_send_delete_kroute: unknown af");
512 			break;
513 		}
514 		kr.flags = F_BLACKHOLE;
515 	}
516 	if (route->type == EIGRP_ROUTE_EXTERNAL)
517 		kr.priority = rdeconf->fib_priority_external;
518 	else {
519 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
520 			kr.priority = rdeconf->fib_priority_summary;
521 		else
522 			kr.priority = rdeconf->fib_priority_internal;
523 	}
524 
525 	rde_imsg_compose_parent(IMSG_KROUTE_CHANGE, 0, &kr, sizeof(kr));
526 
527 	route->flags |= F_EIGRP_ROUTE_INSTALLED;
528 }
529 
530 void
531 rde_send_delete_kroute(struct rt_node *rn, struct eigrp_route *route)
532 {
533 	struct eigrp	*eigrp = route->nbr->eigrp;
534 	struct kroute	 kr;
535 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
536 
537 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
538 	    log_addr(eigrp->af, &route->nbr->addr));
539 
540 	memset(&kr, 0, sizeof(kr));
541 	kr.af = eigrp->af;
542 	kr.prefix = rn->prefix;
543 	kr.prefixlen = rn->prefixlen;
544 	if (route->nbr->ei) {
545 		kr.nexthop = route->nexthop;
546 		kr.ifindex = route->nbr->ei->iface->ifindex;
547 	} else {
548 		switch (eigrp->af) {
549 		case AF_INET:
550 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
551 			break;
552 		case AF_INET6:
553 			kr.nexthop.v6 = lo6;
554 			break;
555 		default:
556 			fatalx("rde_send_delete_kroute: unknown af");
557 			break;
558 		}
559 		kr.flags = F_BLACKHOLE;
560 	}
561 	if (route->type == EIGRP_ROUTE_EXTERNAL)
562 		kr.priority = rdeconf->fib_priority_external;
563 	else {
564 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
565 			kr.priority = rdeconf->fib_priority_summary;
566 		else
567 			kr.priority = rdeconf->fib_priority_internal;
568 	}
569 
570 	rde_imsg_compose_parent(IMSG_KROUTE_DELETE, 0, &kr, sizeof(kr));
571 
572 	route->flags &= ~F_EIGRP_ROUTE_INSTALLED;
573 }
574 
575 static struct redistribute *
576 eigrp_redistribute(struct eigrp *eigrp, struct kroute *kr)
577 {
578 	struct redistribute	*r;
579 	uint8_t			 is_default = 0;
580 	union eigrpd_addr	 addr;
581 
582 	/* only allow the default route via REDIST_DEFAULT */
583 	if (!eigrp_addrisset(kr->af, &kr->prefix) && kr->prefixlen == 0)
584 		is_default = 1;
585 
586 	SIMPLEQ_FOREACH(r, &eigrp->redist_list, entry) {
587 		switch (r->type & ~REDIST_NO) {
588 		case REDIST_STATIC:
589 			if (is_default)
590 				continue;
591 			if (kr->flags & F_STATIC)
592 				return (r->type & REDIST_NO ? NULL : r);
593 			break;
594 		case REDIST_RIP:
595 			if (is_default)
596 				continue;
597 			if (kr->priority == RTP_RIP)
598 				return (r->type & REDIST_NO ? NULL : r);
599 			break;
600 		case REDIST_OSPF:
601 			if (is_default)
602 				continue;
603 			if (kr->priority == RTP_OSPF)
604 				return (r->type & REDIST_NO ? NULL : r);
605 			break;
606 		case REDIST_CONNECTED:
607 			if (is_default)
608 				continue;
609 			if (kr->flags & F_CONNECTED)
610 				return (r->type & REDIST_NO ? NULL : r);
611 			break;
612 		case REDIST_ADDR:
613 			if (eigrp_addrisset(r->af, &r->addr) &&
614 			    r->prefixlen == 0) {
615 				if (is_default)
616 					return (r->type & REDIST_NO ? NULL : r);
617 				else
618 					return (0);
619 			}
620 
621 			eigrp_applymask(kr->af, &addr, &kr->prefix,
622 			    r->prefixlen);
623 			if (eigrp_addrcmp(kr->af, &addr, &r->addr) == 0 &&
624 			    kr->prefixlen >= r->prefixlen)
625 				return (r->type & REDIST_NO ? NULL : r);
626 			break;
627 		case REDIST_DEFAULT:
628 			if (is_default)
629 				return (r->type & REDIST_NO ? NULL : r);
630 			break;
631 		}
632 	}
633 
634 	return (NULL);
635 }
636 
637 static void
638 rt_redist_set(struct kroute *kr, int withdraw)
639 {
640 	struct eigrp		*eigrp;
641 	struct redistribute	*r;
642 	struct redist_metric	*rmetric;
643 	struct rinfo		 ri;
644 
645 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
646 		if (eigrp->af != kr->af)
647 			continue;
648 
649 		r = eigrp_redistribute(eigrp, kr);
650 		if (r == NULL)
651 			continue;
652 
653 		if (r->metric)
654 			rmetric = r->metric;
655 		else if (eigrp->dflt_metric)
656 			rmetric = eigrp->dflt_metric;
657 		else
658 			continue;
659 
660 		memset(&ri, 0, sizeof(ri));
661 		ri.af = kr->af;
662 		ri.type = EIGRP_ROUTE_EXTERNAL;
663 		ri.prefix = kr->prefix;
664 		ri.prefixlen = kr->prefixlen;
665 
666 		/* metric */
667 		if (withdraw)
668 			ri.metric.delay = EIGRP_INFINITE_METRIC;
669 		else
670 			ri.metric.delay = eigrp_composite_delay(rmetric->delay);
671 		ri.metric.bandwidth =
672 		    eigrp_composite_bandwidth(rmetric->bandwidth);
673 		metric_encode_mtu(ri.metric.mtu, rmetric->mtu);
674 		ri.metric.hop_count = 0;
675 		ri.metric.reliability = rmetric->reliability;
676 		ri.metric.load = rmetric->load;
677 		ri.metric.tag = 0;
678 		ri.metric.flags = 0;
679 
680 		/* external metric */
681 		ri.emetric.routerid = htonl(rdeconf->rtr_id.s_addr);
682 		ri.emetric.as = r->emetric.as;
683 		ri.emetric.tag = r->emetric.tag;
684 		ri.emetric.metric = r->emetric.metric;
685 		if (kr->priority == rdeconf->fib_priority_internal)
686 			ri.emetric.protocol = EIGRP_EXT_PROTO_EIGRP;
687 		else if (kr->priority == RTP_STATIC)
688 			ri.emetric.protocol = EIGRP_EXT_PROTO_STATIC;
689 		else if (kr->priority == RTP_RIP)
690 			ri.emetric.protocol = EIGRP_EXT_PROTO_RIP;
691 		else if (kr->priority == RTP_OSPF)
692 			ri.emetric.protocol = EIGRP_EXT_PROTO_OSPF;
693 		else
694 			ri.emetric.protocol = EIGRP_EXT_PROTO_CONN;
695 		ri.emetric.flags = 0;
696 
697 		rde_check_update(eigrp->rnbr_redist, &ri);
698 	}
699 }
700 
701 void
702 rt_summary_set(struct eigrp *eigrp, struct summary_addr *summary,
703     struct classic_metric *metric)
704 {
705 	struct rinfo		 ri;
706 
707 	memset(&ri, 0, sizeof(ri));
708 	ri.af = eigrp->af;
709 	ri.type = EIGRP_ROUTE_INTERNAL;
710 	ri.prefix = summary->prefix;
711 	ri.prefixlen = summary->prefixlen;
712 	ri.metric = *metric;
713 
714 	rde_check_update(eigrp->rnbr_summary, &ri);
715 }
716 
717 /* send all known routing information to new neighbor */
718 static void
719 rt_snap(struct rde_nbr *nbr)
720 {
721 	struct eigrp		*eigrp = nbr->eigrp;
722 	struct rt_node		*rn;
723 	struct rinfo		 ri;
724 
725 	RB_FOREACH(rn, rt_tree, &eigrp->topology)
726 		if (rn->state == DUAL_STA_PASSIVE &&
727 		    !rde_summary_check(nbr->ei, &rn->prefix, rn->prefixlen)) {
728 			rinfo_fill_successor(rn, &ri);
729 			rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE,
730 			    nbr->peerid, 0, &ri, sizeof(ri));
731 		}
732 
733 	rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE_END, nbr->peerid, 0,
734 	    NULL, 0);
735 }
736 
737 static struct ctl_rt *
738 rt_to_ctl(struct rt_node *rn, struct eigrp_route *route)
739 {
740 	static struct ctl_rt	 rtctl;
741 
742 	memset(&rtctl, 0, sizeof(rtctl));
743 	rtctl.af = route->nbr->eigrp->af;
744 	rtctl.as = route->nbr->eigrp->as;
745 	rtctl.prefix = rn->prefix;
746 	rtctl.prefixlen = rn->prefixlen;
747 	rtctl.type = route->type;
748 	rtctl.nexthop = route->nexthop;
749 	if (route->nbr->flags & F_RDE_NBR_REDIST)
750 		strlcpy(rtctl.ifname, "redistribute", sizeof(rtctl.ifname));
751 	else if (route->nbr->flags & F_RDE_NBR_SUMMARY)
752 		strlcpy(rtctl.ifname, "summary", sizeof(rtctl.ifname));
753 	else
754 		memcpy(rtctl.ifname, route->nbr->ei->iface->name,
755 		    sizeof(rtctl.ifname));
756 	rtctl.distance = route->distance;
757 	rtctl.rdistance = route->rdistance;
758 	rtctl.fdistance = rn->successor.fdistance;
759 	rtctl.state = rn->state;
760 	/* metric */
761 	rtctl.metric.delay = eigrp_real_delay(route->metric.delay);
762 	/* translate to microseconds */
763 	rtctl.metric.delay *= 10;
764 	rtctl.metric.bandwidth = eigrp_real_bandwidth(route->metric.bandwidth);
765 	rtctl.metric.mtu = metric_decode_mtu(route->metric.mtu);
766 	rtctl.metric.hop_count = route->metric.hop_count;
767 	rtctl.metric.reliability = route->metric.reliability;
768 	rtctl.metric.load = route->metric.load;
769 	/* external metric */
770 	rtctl.emetric = route->emetric;
771 
772 	if (route->nbr == rn->successor.nbr)
773 		rtctl.flags |= F_CTL_RT_SUCCESSOR;
774 	else if (route->rdistance < rn->successor.fdistance)
775 		rtctl.flags |= F_CTL_RT_FSUCCESSOR;
776 
777 	return (&rtctl);
778 }
779 
780 static void
781 rt_dump(struct ctl_show_topology_req *treq, pid_t pid)
782 {
783 	struct eigrp		*eigrp;
784 	struct rt_node		*rn;
785 	struct eigrp_route	*route;
786 	struct ctl_rt		*rtctl;
787 	int			 first = 1;
788 
789 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
790 		RB_FOREACH(rn, rt_tree, &eigrp->topology) {
791 			if (eigrp_addrisset(treq->af, &treq->prefix) &&
792 			    eigrp_addrcmp(treq->af, &treq->prefix,
793 			    &rn->prefix))
794 				continue;
795 
796 			if (treq->prefixlen &&
797 			    (treq->prefixlen != rn->prefixlen))
798 				continue;
799 
800 			first = 1;
801 			TAILQ_FOREACH(route, &rn->routes, entry) {
802 				if (treq->flags & F_CTL_ACTIVE &&
803 				    !(rn->state & DUAL_STA_ACTIVE_ALL))
804 					continue;
805 				if (!(treq->flags & F_CTL_ALLLINKS) &&
806 				    route->rdistance >= rn->successor.fdistance)
807 					continue;
808 
809 				rtctl = rt_to_ctl(rn, route);
810 				if (first) {
811 					rtctl->flags |= F_CTL_RT_FIRST;
812 					first = 0;
813 				}
814 				rde_imsg_compose_eigrpe(IMSG_CTL_SHOW_TOPOLOGY,
815 				    0, pid, rtctl, sizeof(*rtctl));
816 			}
817 		}
818 	}
819 }
820