xref: /openbsd/usr.sbin/eigrpd/rde.c (revision 3af0505b)
1 /*	$OpenBSD: rde.c,v 1.3 2015/10/05 01:59:33 renato Exp $ */
2 
3 /*
4  * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <arpa/inet.h>
25 #include <errno.h>
26 #include <signal.h>
27 #include <string.h>
28 #include <pwd.h>
29 
30 #include "eigrp.h"
31 #include "eigrpd.h"
32 #include "eigrpe.h"
33 #include "log.h"
34 #include "rde.h"
35 
36 void		 rde_sig_handler(int sig, short, void *);
37 void		 rde_shutdown(void);
38 void		 rde_dispatch_imsg(int, short, void *);
39 void		 rde_dispatch_parent(int, short, void *);
40 
41 struct eigrpd_conf	*rdeconf = NULL, *nconf;
42 struct imsgev		*iev_eigrpe;
43 struct imsgev		*iev_main;
44 
45 extern struct iface_id_head ifaces_by_id;
46 RB_PROTOTYPE(iface_id_head, eigrp_iface, id_tree, iface_id_compare)
47 
48 RB_PROTOTYPE(rt_tree, rt_node, entry, rt_compare)
49 
50 extern struct rde_nbr_head rde_nbrs;
51 RB_PROTOTYPE(rde_nbr_head, rde_nbr, entry, rde_nbr_compare)
52 
53 /* ARGSUSED */
54 void
55 rde_sig_handler(int sig, short event, void *arg)
56 {
57 	/*
58 	 * signal handler rules don't apply, libevent decouples for us
59 	 */
60 
61 	switch (sig) {
62 	case SIGINT:
63 	case SIGTERM:
64 		rde_shutdown();
65 		/* NOTREACHED */
66 	default:
67 		fatalx("unexpected signal");
68 	}
69 }
70 
71 /* route decision engine */
72 pid_t
73 rde(struct eigrpd_conf *xconf, int pipe_parent2rde[2], int pipe_eigrpe2rde[2],
74     int pipe_parent2eigrpe[2])
75 {
76 	struct event		 ev_sigint, ev_sigterm;
77 	struct timeval		 now;
78 	struct passwd		*pw;
79 	pid_t			 pid;
80 	struct eigrp		*eigrp;
81 
82 	switch (pid = fork()) {
83 	case -1:
84 		fatal("cannot fork");
85 		/* NOTREACHED */
86 	case 0:
87 		break;
88 	default:
89 		return (pid);
90 	}
91 
92 	rdeconf = xconf;
93 
94 	if ((pw = getpwnam(EIGRPD_USER)) == NULL)
95 		fatal("getpwnam");
96 
97 	if (chroot(pw->pw_dir) == -1)
98 		fatal("chroot");
99 	if (chdir("/") == -1)
100 		fatal("chdir(\"/\")");
101 
102 	setproctitle("route decision engine");
103 	eigrpd_process = PROC_RDE_ENGINE;
104 
105 	if (setgroups(1, &pw->pw_gid) ||
106 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
107 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
108 		fatal("can't drop privileges");
109 
110 	event_init();
111 
112 	/* setup signal handler */
113 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
114 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
115 	signal_add(&ev_sigint, NULL);
116 	signal_add(&ev_sigterm, NULL);
117 	signal(SIGPIPE, SIG_IGN);
118 	signal(SIGHUP, SIG_IGN);
119 
120 	/* setup pipes */
121 	close(pipe_eigrpe2rde[0]);
122 	close(pipe_parent2rde[0]);
123 	close(pipe_parent2eigrpe[0]);
124 	close(pipe_parent2eigrpe[1]);
125 
126 	if ((iev_eigrpe = malloc(sizeof(struct imsgev))) == NULL ||
127 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
128 		fatal(NULL);
129 	imsg_init(&iev_eigrpe->ibuf, pipe_eigrpe2rde[1]);
130 	iev_eigrpe->handler = rde_dispatch_imsg;
131 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
132 	iev_main->handler = rde_dispatch_parent;
133 
134 	/* setup event handler */
135 	iev_eigrpe->events = EV_READ;
136 	event_set(&iev_eigrpe->ev, iev_eigrpe->ibuf.fd, iev_eigrpe->events,
137 	    iev_eigrpe->handler, iev_eigrpe);
138 	event_add(&iev_eigrpe->ev, NULL);
139 
140 	iev_main->events = EV_READ;
141 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
142 	    iev_main->handler, iev_main);
143 	event_add(&iev_main->ev, NULL);
144 
145 	gettimeofday(&now, NULL);
146 	rdeconf->uptime = now.tv_sec;
147 
148 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry)
149 		rde_instance_init(eigrp);
150 
151 	event_dispatch();
152 
153 	rde_shutdown();
154 	/* NOTREACHED */
155 
156 	return (0);
157 }
158 
159 void
160 rde_shutdown(void)
161 {
162 	config_clear(rdeconf);
163 
164 	msgbuf_clear(&iev_eigrpe->ibuf.w);
165 	free(iev_eigrpe);
166 	msgbuf_clear(&iev_main->ibuf.w);
167 	free(iev_main);
168 
169 	log_info("route decision engine exiting");
170 	_exit(0);
171 }
172 
173 int
174 rde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen)
175 {
176 	return (imsg_compose_event(iev_main, type, 0, pid, -1,
177 	    data, datalen));
178 }
179 
180 int
181 rde_imsg_compose_eigrpe(int type, uint32_t peerid, pid_t pid, void *data,
182     uint16_t datalen)
183 {
184 	return (imsg_compose_event(iev_eigrpe, type, peerid, pid, -1,
185 	    data, datalen));
186 }
187 
188 /* ARGSUSED */
189 void
190 rde_dispatch_imsg(int fd, short event, void *bula)
191 {
192 	struct imsgev		*iev = bula;
193 	struct imsgbuf		*ibuf;
194 	struct imsg		 imsg;
195 	struct rde_nbr		*nbr;
196 	struct rde_nbr		 new;
197 	struct rinfo		 rinfo;
198 	ssize_t			 n;
199 	int			 shut = 0, verbose;
200 
201 	ibuf = &iev->ibuf;
202 
203 	if (event & EV_READ) {
204 		if ((n = imsg_read(ibuf)) == -1)
205 			fatal("imsg_read error");
206 		if (n == 0)	/* connection closed */
207 			shut = 1;
208 	}
209 	if (event & EV_WRITE) {
210 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
211 			fatal("msgbuf_write");
212 		if (n == 0)	/* connection closed */
213 			shut = 1;
214 	}
215 
216 	for (;;) {
217 		if ((n = imsg_get(ibuf, &imsg)) == -1)
218 			fatal("rde_dispatch_imsg: imsg_read error");
219 		if (n == 0)
220 			break;
221 
222 		switch (imsg.hdr.type) {
223 		case IMSG_NEIGHBOR_UP:
224 			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
225 			    sizeof(struct rde_nbr))
226 				fatalx("invalid size of neighbor request");
227 			memcpy(&new, imsg.data, sizeof(new));
228 
229 			if (rde_nbr_find(imsg.hdr.peerid))
230 				fatalx("rde_dispatch_imsg: "
231 				    "neighbor already exists");
232 			rde_nbr_new(imsg.hdr.peerid, &new);
233 			break;
234 		case IMSG_NEIGHBOR_DOWN:
235 			nbr = rde_nbr_find(imsg.hdr.peerid);
236 			if (nbr == NULL) {
237 				log_debug("%s: cannot find rde neighbor",
238 				    __func__);
239 				break;
240 			}
241 
242 			rde_check_link_down_nbr(nbr);
243 			rde_flush_queries();
244 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid), 0);
245 			break;
246 		case IMSG_RECV_UPDATE_INIT:
247 			nbr = rde_nbr_find(imsg.hdr.peerid);
248 			if (nbr == NULL) {
249 				log_debug("%s: cannot find rde neighbor",
250 				    __func__);
251 				break;
252 			}
253 
254 			rt_snap(nbr);
255 			break;
256 		case IMSG_RECV_UPDATE:
257 		case IMSG_RECV_QUERY:
258 		case IMSG_RECV_REPLY:
259 		case IMSG_RECV_SIAQUERY:
260 		case IMSG_RECV_SIAREPLY:
261 			nbr = rde_nbr_find(imsg.hdr.peerid);
262 			if (nbr == NULL) {
263 				log_debug("%s: cannot find rde neighbor",
264 				    __func__);
265 				break;
266 			}
267 
268 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rinfo))
269 				fatalx("invalid size of rinfo");
270 			memcpy(&rinfo, imsg.data, sizeof(rinfo));
271 
272 			switch (imsg.hdr.type) {
273 			case IMSG_RECV_UPDATE:
274 				rde_check_update(nbr, &rinfo);
275 				break;
276 			case IMSG_RECV_QUERY:
277 				rde_check_query(nbr, &rinfo, 0);
278 				break;
279 			case IMSG_RECV_REPLY:
280 				rde_check_reply(nbr, &rinfo, 0);
281 				break;
282 			case IMSG_RECV_SIAQUERY:
283 				rde_check_query(nbr, &rinfo, 1);
284 				break;
285 			case IMSG_RECV_SIAREPLY:
286 				rde_check_reply(nbr, &rinfo, 1);
287 				break;
288 			}
289 			break;
290 		case IMSG_CTL_SHOW_TOPOLOGY:
291 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
292 			    sizeof(struct ctl_show_topology_req)) {
293 				log_warnx("%s: wrong imsg len", __func__);
294 				break;
295 			}
296 
297 			rt_dump(imsg.data, imsg.hdr.pid);
298 			rde_imsg_compose_eigrpe(IMSG_CTL_END, 0, imsg.hdr.pid,
299 			    NULL, 0);
300 			break;
301 		case IMSG_CTL_LOG_VERBOSE:
302 			/* already checked by eigrpe */
303 			memcpy(&verbose, imsg.data, sizeof(verbose));
304 			log_verbose(verbose);
305 			break;
306 		default:
307 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
308 			    imsg.hdr.type);
309 			break;
310 		}
311 		imsg_free(&imsg);
312 	}
313 	if (!shut)
314 		imsg_event_add(iev);
315 	else {
316 		/* this pipe is dead, so remove the event handler */
317 		event_del(&iev->ev);
318 		event_loopexit(NULL);
319 	}
320 }
321 
322 /* ARGSUSED */
323 void
324 rde_dispatch_parent(int fd, short event, void *bula)
325 {
326 	struct iface		*niface = NULL;
327 	static struct eigrp	*neigrp;
328 	struct eigrp_iface	*nei;
329 	struct imsg		 imsg;
330 	struct imsgev		*iev = bula;
331 	struct imsgbuf		*ibuf;
332 	struct kif		*kif;
333 	ssize_t			 n;
334 	int			 shut = 0;
335 
336 	ibuf = &iev->ibuf;
337 
338 	if (event & EV_READ) {
339 		if ((n = imsg_read(ibuf)) == -1)
340 			fatal("imsg_read error");
341 		if (n == 0)	/* connection closed */
342 			shut = 1;
343 	}
344 	if (event & EV_WRITE) {
345 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
346 			fatal("msgbuf_write");
347 		if (n == 0)	/* connection closed */
348 			shut = 1;
349 	}
350 
351 	for (;;) {
352 		if ((n = imsg_get(ibuf, &imsg)) == -1)
353 			fatal("rde_dispatch_parent: imsg_read error");
354 		if (n == 0)
355 			break;
356 
357 		switch (imsg.hdr.type) {
358 		case IMSG_IFDOWN:
359 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
360 			    sizeof(struct kif))
361 				fatalx("IFDOWN imsg with wrong len");
362 			kif = imsg.data;
363 			rde_check_link_down(kif->ifindex);
364 			break;
365 		case IMSG_NETWORK_ADD:
366 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
367 			    sizeof(struct kroute))
368 				fatalx("IMSG_NETWORK_ADD imsg with wrong len");
369 			rt_redist_set(imsg.data, 0);
370 			break;
371 		case IMSG_NETWORK_DEL:
372 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
373 			    sizeof(struct kroute))
374 				fatalx("IMSG_NETWORK_DEL imsg with wrong len");
375 			rt_redist_set(imsg.data, 1);
376 			break;
377 		case IMSG_RECONF_CONF:
378 			if ((nconf = malloc(sizeof(struct eigrpd_conf))) ==
379 			    NULL)
380 				fatal(NULL);
381 			memcpy(nconf, imsg.data, sizeof(struct eigrpd_conf));
382 
383 			TAILQ_INIT(&nconf->iface_list);
384 			TAILQ_INIT(&nconf->instances);
385 			break;
386 		case IMSG_RECONF_INSTANCE:
387 			if ((neigrp = malloc(sizeof(struct eigrp))) == NULL)
388 				fatal(NULL);
389 			memcpy(neigrp, imsg.data, sizeof(struct eigrp));
390 
391 			SIMPLEQ_INIT(&neigrp->redist_list);
392 			TAILQ_INIT(&neigrp->ei_list);
393 			RB_INIT(&neigrp->nbrs);
394 			RB_INIT(&neigrp->topology);
395 			TAILQ_INSERT_TAIL(&nconf->instances, neigrp, entry);
396 			break;
397 		case IMSG_RECONF_IFACE:
398 			niface = imsg.data;
399 			niface = if_lookup(nconf, niface->ifindex);
400 			if (niface)
401 				break;
402 
403 			if ((niface = malloc(sizeof(struct iface))) == NULL)
404 				fatal(NULL);
405 			memcpy(niface, imsg.data, sizeof(struct iface));
406 
407 			TAILQ_INIT(&niface->addr_list);
408 			TAILQ_INSERT_TAIL(&nconf->iface_list, niface, entry);
409 			break;
410 		case IMSG_RECONF_EIGRP_IFACE:
411 			if (niface == NULL)
412 				break;
413 			if ((nei = malloc(sizeof(struct eigrp_iface))) == NULL)
414 				fatal(NULL);
415 			memcpy(nei, imsg.data, sizeof(struct eigrp_iface));
416 
417 			nei->iface = niface;
418 			nei->eigrp = neigrp;
419 			TAILQ_INIT(&nei->nbr_list);
420 			TAILQ_INIT(&nei->update_list);
421 			TAILQ_INIT(&nei->query_list);
422 			TAILQ_INSERT_TAIL(&niface->ei_list, nei, i_entry);
423 			TAILQ_INSERT_TAIL(&neigrp->ei_list, nei, e_entry);
424 			if (RB_INSERT(iface_id_head, &ifaces_by_id, nei) !=
425 			    NULL)
426 				fatalx("rde_dispatch_parent: "
427 				    "RB_INSERT(ifaces_by_id) failed");
428 			break;
429 		case IMSG_RECONF_END:
430 			merge_config(rdeconf, nconf);
431 			nconf = NULL;
432 			break;
433 		default:
434 			log_debug("%s: unexpected imsg %d", __func__,
435 			    imsg.hdr.type);
436 			break;
437 		}
438 		imsg_free(&imsg);
439 	}
440 	if (!shut)
441 		imsg_event_add(iev);
442 	else {
443 		/* this pipe is dead, so remove the event handler */
444 		event_del(&iev->ev);
445 		event_loopexit(NULL);
446 	}
447 }
448 
449 void
450 rde_instance_init(struct eigrp *eigrp)
451 {
452 	struct rde_nbr		nbr;
453 
454 	memset(&nbr, 0, sizeof(nbr));
455 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_REDIST;
456 	eigrp->rnbr_redist = rde_nbr_new(NBR_IDSELF, &nbr);
457 	eigrp->rnbr_redist->eigrp = eigrp;
458 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_SUMMARY;
459 	eigrp->rnbr_summary = rde_nbr_new(NBR_IDSELF, &nbr);
460 	eigrp->rnbr_summary->eigrp = eigrp;
461 }
462 
463 void
464 rde_instance_del(struct eigrp *eigrp)
465 {
466 	struct rde_nbr		*nbr, *safe;
467 	struct rt_node		*rn;
468 
469 	/* clear topology */
470 	while((rn = RB_MIN(rt_tree, &eigrp->topology)) != NULL)
471 		rt_del(rn);
472 
473 	/* clear nbrs */
474 	RB_FOREACH_SAFE(nbr, rde_nbr_head, &rde_nbrs, safe)
475 		if (nbr->eigrp == eigrp)
476 			rde_nbr_del(nbr, 0);
477 	rde_nbr_del(eigrp->rnbr_redist, 0);
478 	rde_nbr_del(eigrp->rnbr_summary, 0);
479 
480 	free(eigrp);
481 }
482 
483 void
484 rde_send_change_kroute(struct rt_node *rn, struct eigrp_route *route)
485 {
486 	struct eigrp	*eigrp = route->nbr->eigrp;
487 	struct kroute	 kr;
488 
489 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
490 	    log_addr(eigrp->af, &route->nbr->addr));
491 
492 	memset(&kr, 0, sizeof(kr));
493 	kr.af = eigrp->af;
494 	memcpy(&kr.prefix, &rn->prefix, sizeof(kr.prefix));
495 	kr.prefixlen = rn->prefixlen;
496 	if (eigrp_addrisset(eigrp->af, &route->nexthop))
497 		memcpy(&kr.nexthop, &route->nexthop, sizeof(kr.nexthop));
498 	else
499 		memcpy(&kr.nexthop, &route->nbr->addr, sizeof(kr.nexthop));
500 	kr.ifindex = route->nbr->ei->iface->ifindex;
501 	if (route->type == EIGRP_ROUTE_EXTERNAL)
502 		kr.priority = rdeconf->fib_priority_external;
503 	else
504 		kr.priority = rdeconf->fib_priority_internal;
505 
506 	rde_imsg_compose_parent(IMSG_KROUTE_CHANGE, 0, &kr, sizeof(kr));
507 
508 	route->flags |= F_EIGRP_ROUTE_INSTALLED;
509 }
510 
511 void
512 rde_send_delete_kroute(struct rt_node *rn, struct eigrp_route *route)
513 {
514 	struct eigrp	*eigrp = route->nbr->eigrp;
515 	struct kroute	 kr;
516 
517 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
518 	    log_addr(eigrp->af, &route->nbr->addr));
519 
520 	memset(&kr, 0, sizeof(kr));
521 	kr.af = eigrp->af;
522 	memcpy(&kr.prefix, &rn->prefix, sizeof(kr.prefix));
523 	kr.prefixlen = rn->prefixlen;
524 	if (eigrp_addrisset(eigrp->af, &route->nexthop))
525 		memcpy(&kr.nexthop, &route->nexthop, sizeof(kr.nexthop));
526 	else
527 		memcpy(&kr.nexthop, &route->nbr->addr, sizeof(kr.nexthop));
528 	kr.ifindex = route->nbr->ei->iface->ifindex;
529 	if (route->type == EIGRP_ROUTE_EXTERNAL)
530 		kr.priority = rdeconf->fib_priority_external;
531 	else
532 		kr.priority = rdeconf->fib_priority_internal;
533 
534 	rde_imsg_compose_parent(IMSG_KROUTE_DELETE, 0, &kr, sizeof(kr));
535 
536 	route->flags &= ~F_EIGRP_ROUTE_INSTALLED;
537 }
538 
539 static struct redistribute *
540 eigrp_redistribute(struct eigrp *eigrp, struct kroute *kr)
541 {
542 	struct redistribute	*r;
543 	uint8_t			 is_default = 0;
544 	union eigrpd_addr	 addr;
545 
546 	/* only allow the default route via REDIST_DEFAULT */
547 	if (!eigrp_addrisset(kr->af, &kr->prefix) && kr->prefixlen == 0)
548 		is_default = 1;
549 
550 	SIMPLEQ_FOREACH(r, &eigrp->redist_list, entry) {
551 		switch (r->type & ~REDIST_NO) {
552 		case REDIST_STATIC:
553 			if (is_default)
554 				continue;
555 			if (kr->flags & F_STATIC)
556 				return (r->type & REDIST_NO ? NULL : r);
557 			break;
558 		case REDIST_RIP:
559 			if (is_default)
560 				continue;
561 			if (kr->priority == RTP_RIP)
562 				return (r->type & REDIST_NO ? NULL : r);
563 			break;
564 		case REDIST_OSPF:
565 			if (is_default)
566 				continue;
567 			if (kr->priority == RTP_OSPF)
568 				return (r->type & REDIST_NO ? NULL : r);
569 			break;
570 		case REDIST_CONNECTED:
571 			if (is_default)
572 				continue;
573 			if (kr->flags & F_CONNECTED)
574 				return (r->type & REDIST_NO ? NULL : r);
575 			break;
576 		case REDIST_ADDR:
577 			if (eigrp_addrisset(r->af, &r->addr) &&
578 			    r->prefixlen == 0) {
579 				if (is_default)
580 					return (r->type & REDIST_NO ? NULL : r);
581 				else
582 					return (0);
583 			}
584 
585 			eigrp_applymask(kr->af, &addr, &kr->prefix,
586 			    r->prefixlen);
587 			if (eigrp_addrcmp(kr->af, &addr, &r->addr) == 0 &&
588 			    kr->prefixlen >= r->prefixlen)
589 				return (r->type & REDIST_NO ? NULL : r);
590 			break;
591 		case REDIST_DEFAULT:
592 			if (is_default)
593 				return (r->type & REDIST_NO ? NULL : r);
594 			break;
595 		}
596 	}
597 
598 	return (NULL);
599 }
600 
601 void
602 rt_redist_set(struct kroute *kr, int withdraw)
603 {
604 	struct eigrp		*eigrp;
605 	struct redistribute	*r;
606 	struct redist_metric	*rmetric;
607 	struct rinfo		 ri;
608 
609 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
610 		if (eigrp->af != kr->af)
611 			continue;
612 
613 		r = eigrp_redistribute(eigrp, kr);
614 		if (r == NULL)
615 			continue;
616 
617 		if (r->metric)
618 			rmetric = r->metric;
619 		else if (eigrp->dflt_metric)
620 			rmetric = eigrp->dflt_metric;
621 		else
622 			continue;
623 
624 		memset(&ri, 0, sizeof(ri));
625 		ri.af = kr->af;
626 		ri.type = EIGRP_ROUTE_EXTERNAL;
627 		memcpy(&ri.prefix, &kr->prefix, sizeof(ri.prefix));
628 		ri.prefixlen = kr->prefixlen;
629 
630 		/* metric */
631 		if (withdraw)
632 			ri.metric.delay = EIGRP_INFINITE_METRIC;
633 		else
634 			ri.metric.delay = eigrp_composite_delay(rmetric->delay);
635 		ri.metric.bandwidth =
636 		    eigrp_composite_bandwidth(rmetric->bandwidth);
637 		metric_encode_mtu(ri.metric.mtu, rmetric->mtu);
638 		ri.metric.hop_count = 0;
639 		ri.metric.reliability = rmetric->reliability;
640 		ri.metric.load = rmetric->load;
641 		ri.metric.tag = 0;
642 		ri.metric.flags = 0;
643 
644 		/* external metric */
645 		ri.emetric.routerid = htonl(eigrp_router_id(rdeconf));
646 		ri.emetric.as = r->emetric.as;
647 		ri.emetric.tag = r->emetric.tag;
648 		ri.emetric.metric = r->emetric.metric;
649 		if (kr->priority == rdeconf->fib_priority_internal)
650 			ri.emetric.protocol = EIGRP_EXT_PROTO_EIGRP;
651 		else if (kr->priority == RTP_STATIC)
652 			ri.emetric.protocol = EIGRP_EXT_PROTO_STATIC;
653 		else if (kr->priority == RTP_RIP)
654 			ri.emetric.protocol = EIGRP_EXT_PROTO_RIP;
655 		else if (kr->priority == RTP_OSPF)
656 			ri.emetric.protocol = EIGRP_EXT_PROTO_OSPF;
657 		else
658 			ri.emetric.protocol = EIGRP_EXT_PROTO_CONN;
659 		ri.emetric.flags = 0;
660 
661 		rde_check_update(eigrp->rnbr_redist, &ri);
662 	}
663 }
664 
665 /* send all known routing information to new neighbor */
666 void
667 rt_snap(struct rde_nbr *nbr)
668 {
669 	struct eigrp		*eigrp = nbr->eigrp;
670 	struct rt_node		*rn;
671 	struct rinfo		 ri;
672 
673 	RB_FOREACH(rn, rt_tree, &eigrp->topology)
674 		if (rn->state == DUAL_STA_PASSIVE) {
675 			rinfo_fill_successor(rn, &ri);
676 			rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE,
677 			    nbr->peerid, 0, &ri, sizeof(ri));
678 		}
679 
680 	rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE_END, nbr->peerid, 0,
681 	    NULL, 0);
682 }
683 
684 struct ctl_rt *
685 rt_to_ctl(struct rt_node *rn, struct eigrp_route *route)
686 {
687 	static struct ctl_rt	 rtctl;
688 
689 	memset(&rtctl, 0, sizeof(rtctl));
690 	rtctl.af = route->nbr->eigrp->af;
691 	rtctl.as = route->nbr->eigrp->as;
692 	memcpy(&rtctl.prefix, &rn->prefix, sizeof(rtctl.prefix));
693 	rtctl.prefixlen = rn->prefixlen;
694 	rtctl.type = route->type;
695 	memcpy(&rtctl.nexthop, &route->nbr->addr, sizeof(rtctl.nexthop));
696 	if (route->nbr->flags & F_RDE_NBR_REDIST)
697 		strlcpy(rtctl.ifname, "redistribute", sizeof(rtctl.ifname));
698 	else if (route->nbr->flags & F_RDE_NBR_SUMMARY)
699 		strlcpy(rtctl.ifname, "summary", sizeof(rtctl.ifname));
700 	else
701 		memcpy(rtctl.ifname, route->nbr->ei->iface->name,
702 		    sizeof(rtctl.ifname));
703 	rtctl.distance = route->distance;
704 	rtctl.rdistance = route->rdistance;
705 	rtctl.fdistance = rn->successor.fdistance;
706 	rtctl.state = rn->state;
707 	/* metric */
708 	rtctl.metric.delay = eigrp_real_delay(route->metric.delay);
709 	/* translate to microseconds */
710 	rtctl.metric.delay *= 10;
711 	rtctl.metric.bandwidth = eigrp_real_bandwidth(route->metric.bandwidth);
712 	rtctl.metric.mtu = metric_decode_mtu(route->metric.mtu);
713 	rtctl.metric.hop_count = route->metric.hop_count;
714 	rtctl.metric.reliability = route->metric.reliability;
715 	rtctl.metric.load = route->metric.load;
716 	/* external metric */
717 	memcpy(&rtctl.emetric, &route->emetric, sizeof(rtctl.emetric));
718 
719 	if (route->nbr == rn->successor.nbr)
720 		rtctl.flags |= F_CTL_RT_SUCCESSOR;
721 	else if (route->rdistance < rn->successor.fdistance)
722 		rtctl.flags |= F_CTL_RT_FSUCCESSOR;
723 
724 	return (&rtctl);
725 }
726 
727 void
728 rt_dump(struct ctl_show_topology_req *treq, pid_t pid)
729 {
730 	struct eigrp		*eigrp;
731 	struct rt_node		*rn;
732 	struct eigrp_route	*route;
733 	struct ctl_rt		*rtctl;
734 	int			 first = 1;
735 
736 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
737 		RB_FOREACH(rn, rt_tree, &eigrp->topology) {
738 			if (eigrp_addrisset(treq->af, &treq->prefix) &&
739 			    eigrp_addrcmp(treq->af, &treq->prefix,
740 			    &rn->prefix))
741 				continue;
742 
743 			if (treq->prefixlen &&
744 			    (treq->prefixlen != rn->prefixlen))
745 				continue;
746 
747 			first = 1;
748 			TAILQ_FOREACH(route, &rn->routes, entry) {
749 				if (treq->flags & F_CTL_ACTIVE &&
750 				    !(rn->state & DUAL_STA_ACTIVE_ALL))
751 					continue;
752 				if (!(treq->flags & F_CTL_ALLLINKS) &&
753 				    route->rdistance >= rn->successor.fdistance)
754 					continue;
755 
756 				rtctl = rt_to_ctl(rn, route);
757 				if (first) {
758 					rtctl->flags |= F_CTL_RT_FIRST;
759 					first = 0;
760 				}
761 				rde_imsg_compose_eigrpe(IMSG_CTL_SHOW_TOPOLOGY,
762 				    0, pid, rtctl, sizeof(*rtctl));
763 			}
764 		}
765 	}
766 }
767