xref: /openbsd/usr.sbin/eigrpd/rde.c (revision 02eb61ef)
1 /*	$OpenBSD: rde.c,v 1.12 2016/04/15 13:21:45 renato Exp $ */
2 
3 /*
4  * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <arpa/inet.h>
25 #include <errno.h>
26 #include <signal.h>
27 #include <string.h>
28 #include <pwd.h>
29 
30 #include "eigrp.h"
31 #include "eigrpd.h"
32 #include "eigrpe.h"
33 #include "log.h"
34 #include "rde.h"
35 
36 void		 rde_sig_handler(int sig, short, void *);
37 void		 rde_shutdown(void);
38 void		 rde_dispatch_imsg(int, short, void *);
39 void		 rde_dispatch_parent(int, short, void *);
40 
41 struct eigrpd_conf	*rdeconf = NULL, *nconf;
42 struct imsgev		*iev_eigrpe;
43 struct imsgev		*iev_main;
44 
45 extern struct iface_id_head ifaces_by_id;
46 RB_PROTOTYPE(iface_id_head, eigrp_iface, id_tree, iface_id_compare)
47 
48 RB_PROTOTYPE(rt_tree, rt_node, entry, rt_compare)
49 
50 extern struct rde_nbr_head rde_nbrs;
51 RB_PROTOTYPE(rde_nbr_head, rde_nbr, entry, rde_nbr_compare)
52 
53 /* ARGSUSED */
54 void
55 rde_sig_handler(int sig, short event, void *arg)
56 {
57 	/*
58 	 * signal handler rules don't apply, libevent decouples for us
59 	 */
60 
61 	switch (sig) {
62 	case SIGINT:
63 	case SIGTERM:
64 		rde_shutdown();
65 		/* NOTREACHED */
66 	default:
67 		fatalx("unexpected signal");
68 	}
69 }
70 
71 /* route decision engine */
72 pid_t
73 rde(struct eigrpd_conf *xconf, int pipe_parent2rde[2], int pipe_eigrpe2rde[2],
74     int pipe_parent2eigrpe[2])
75 {
76 	struct event		 ev_sigint, ev_sigterm;
77 	struct timeval		 now;
78 	struct passwd		*pw;
79 	pid_t			 pid;
80 	struct eigrp		*eigrp;
81 
82 	switch (pid = fork()) {
83 	case -1:
84 		fatal("cannot fork");
85 		/* NOTREACHED */
86 	case 0:
87 		break;
88 	default:
89 		return (pid);
90 	}
91 
92 	rdeconf = xconf;
93 
94 	if ((pw = getpwnam(EIGRPD_USER)) == NULL)
95 		fatal("getpwnam");
96 
97 	if (chroot(pw->pw_dir) == -1)
98 		fatal("chroot");
99 	if (chdir("/") == -1)
100 		fatal("chdir(\"/\")");
101 
102 	setproctitle("route decision engine");
103 	eigrpd_process = PROC_RDE_ENGINE;
104 
105 	if (setgroups(1, &pw->pw_gid) ||
106 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
107 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
108 		fatal("can't drop privileges");
109 
110 	if (pledge("stdio", NULL) == -1)
111 		fatal("pledge");
112 
113 	event_init();
114 
115 	/* setup signal handler */
116 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
117 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
118 	signal_add(&ev_sigint, NULL);
119 	signal_add(&ev_sigterm, NULL);
120 	signal(SIGPIPE, SIG_IGN);
121 	signal(SIGHUP, SIG_IGN);
122 
123 	/* setup pipes */
124 	close(pipe_eigrpe2rde[0]);
125 	close(pipe_parent2rde[0]);
126 	close(pipe_parent2eigrpe[0]);
127 	close(pipe_parent2eigrpe[1]);
128 
129 	if ((iev_eigrpe = malloc(sizeof(struct imsgev))) == NULL ||
130 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
131 		fatal(NULL);
132 	imsg_init(&iev_eigrpe->ibuf, pipe_eigrpe2rde[1]);
133 	iev_eigrpe->handler = rde_dispatch_imsg;
134 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
135 	iev_main->handler = rde_dispatch_parent;
136 
137 	/* setup event handler */
138 	iev_eigrpe->events = EV_READ;
139 	event_set(&iev_eigrpe->ev, iev_eigrpe->ibuf.fd, iev_eigrpe->events,
140 	    iev_eigrpe->handler, iev_eigrpe);
141 	event_add(&iev_eigrpe->ev, NULL);
142 
143 	iev_main->events = EV_READ;
144 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
145 	    iev_main->handler, iev_main);
146 	event_add(&iev_main->ev, NULL);
147 
148 	gettimeofday(&now, NULL);
149 	global.uptime = now.tv_sec;
150 
151 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry)
152 		rde_instance_init(eigrp);
153 
154 	event_dispatch();
155 
156 	rde_shutdown();
157 	/* NOTREACHED */
158 
159 	return (0);
160 }
161 
162 void
163 rde_shutdown(void)
164 {
165 	config_clear(rdeconf);
166 
167 	msgbuf_clear(&iev_eigrpe->ibuf.w);
168 	free(iev_eigrpe);
169 	msgbuf_clear(&iev_main->ibuf.w);
170 	free(iev_main);
171 
172 	log_info("route decision engine exiting");
173 	_exit(0);
174 }
175 
176 int
177 rde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen)
178 {
179 	return (imsg_compose_event(iev_main, type, 0, pid, -1,
180 	    data, datalen));
181 }
182 
183 int
184 rde_imsg_compose_eigrpe(int type, uint32_t peerid, pid_t pid, void *data,
185     uint16_t datalen)
186 {
187 	return (imsg_compose_event(iev_eigrpe, type, peerid, pid, -1,
188 	    data, datalen));
189 }
190 
191 /* ARGSUSED */
192 void
193 rde_dispatch_imsg(int fd, short event, void *bula)
194 {
195 	struct imsgev		*iev = bula;
196 	struct imsgbuf		*ibuf;
197 	struct imsg		 imsg;
198 	struct rde_nbr		*nbr;
199 	struct rde_nbr		 new;
200 	struct rinfo		 rinfo;
201 	ssize_t			 n;
202 	int			 shut = 0, verbose;
203 
204 	ibuf = &iev->ibuf;
205 
206 	if (event & EV_READ) {
207 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
208 			fatal("imsg_read error");
209 		if (n == 0)	/* connection closed */
210 			shut = 1;
211 	}
212 	if (event & EV_WRITE) {
213 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
214 			fatal("msgbuf_write");
215 		if (n == 0)	/* connection closed */
216 			shut = 1;
217 	}
218 
219 	for (;;) {
220 		if ((n = imsg_get(ibuf, &imsg)) == -1)
221 			fatal("rde_dispatch_imsg: imsg_get error");
222 		if (n == 0)
223 			break;
224 
225 		switch (imsg.hdr.type) {
226 		case IMSG_NEIGHBOR_UP:
227 			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
228 			    sizeof(struct rde_nbr))
229 				fatalx("invalid size of neighbor request");
230 			memcpy(&new, imsg.data, sizeof(new));
231 
232 			if (rde_nbr_find(imsg.hdr.peerid))
233 				fatalx("rde_dispatch_imsg: "
234 				    "neighbor already exists");
235 			rde_nbr_new(imsg.hdr.peerid, &new);
236 			break;
237 		case IMSG_NEIGHBOR_DOWN:
238 			nbr = rde_nbr_find(imsg.hdr.peerid);
239 			if (nbr == NULL) {
240 				log_debug("%s: cannot find rde neighbor",
241 				    __func__);
242 				break;
243 			}
244 
245 			rde_check_link_down_nbr(nbr);
246 			rde_flush_queries();
247 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid), 0);
248 			break;
249 		case IMSG_RECV_UPDATE_INIT:
250 			nbr = rde_nbr_find(imsg.hdr.peerid);
251 			if (nbr == NULL) {
252 				log_debug("%s: cannot find rde neighbor",
253 				    __func__);
254 				break;
255 			}
256 
257 			rt_snap(nbr);
258 			break;
259 		case IMSG_RECV_UPDATE:
260 		case IMSG_RECV_QUERY:
261 		case IMSG_RECV_REPLY:
262 		case IMSG_RECV_SIAQUERY:
263 		case IMSG_RECV_SIAREPLY:
264 			nbr = rde_nbr_find(imsg.hdr.peerid);
265 			if (nbr == NULL) {
266 				log_debug("%s: cannot find rde neighbor",
267 				    __func__);
268 				break;
269 			}
270 
271 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rinfo))
272 				fatalx("invalid size of rinfo");
273 			memcpy(&rinfo, imsg.data, sizeof(rinfo));
274 
275 			switch (imsg.hdr.type) {
276 			case IMSG_RECV_UPDATE:
277 				rde_check_update(nbr, &rinfo);
278 				break;
279 			case IMSG_RECV_QUERY:
280 				rde_check_query(nbr, &rinfo, 0);
281 				break;
282 			case IMSG_RECV_REPLY:
283 				rde_check_reply(nbr, &rinfo, 0);
284 				break;
285 			case IMSG_RECV_SIAQUERY:
286 				rde_check_query(nbr, &rinfo, 1);
287 				break;
288 			case IMSG_RECV_SIAREPLY:
289 				rde_check_reply(nbr, &rinfo, 1);
290 				break;
291 			}
292 			break;
293 		case IMSG_CTL_SHOW_TOPOLOGY:
294 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
295 			    sizeof(struct ctl_show_topology_req)) {
296 				log_warnx("%s: wrong imsg len", __func__);
297 				break;
298 			}
299 
300 			rt_dump(imsg.data, imsg.hdr.pid);
301 			rde_imsg_compose_eigrpe(IMSG_CTL_END, 0, imsg.hdr.pid,
302 			    NULL, 0);
303 			break;
304 		case IMSG_CTL_LOG_VERBOSE:
305 			/* already checked by eigrpe */
306 			memcpy(&verbose, imsg.data, sizeof(verbose));
307 			log_verbose(verbose);
308 			break;
309 		default:
310 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
311 			    imsg.hdr.type);
312 			break;
313 		}
314 		imsg_free(&imsg);
315 	}
316 	if (!shut)
317 		imsg_event_add(iev);
318 	else {
319 		/* this pipe is dead, so remove the event handler */
320 		event_del(&iev->ev);
321 		event_loopexit(NULL);
322 	}
323 }
324 
325 /* ARGSUSED */
326 void
327 rde_dispatch_parent(int fd, short event, void *bula)
328 {
329 	struct iface		*niface = NULL;
330 	static struct eigrp	*neigrp;
331 	struct eigrp_iface	*nei;
332 	struct imsg		 imsg;
333 	struct imsgev		*iev = bula;
334 	struct imsgbuf		*ibuf;
335 	struct kif		*kif;
336 	ssize_t			 n;
337 	int			 shut = 0;
338 
339 	ibuf = &iev->ibuf;
340 
341 	if (event & EV_READ) {
342 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
343 			fatal("imsg_read error");
344 		if (n == 0)	/* connection closed */
345 			shut = 1;
346 	}
347 	if (event & EV_WRITE) {
348 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
349 			fatal("msgbuf_write");
350 		if (n == 0)	/* connection closed */
351 			shut = 1;
352 	}
353 
354 	for (;;) {
355 		if ((n = imsg_get(ibuf, &imsg)) == -1)
356 			fatal("rde_dispatch_parent: imsg_get error");
357 		if (n == 0)
358 			break;
359 
360 		switch (imsg.hdr.type) {
361 		case IMSG_IFDOWN:
362 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
363 			    sizeof(struct kif))
364 				fatalx("IFDOWN imsg with wrong len");
365 			kif = imsg.data;
366 			rde_check_link_down(kif->ifindex);
367 			break;
368 		case IMSG_NETWORK_ADD:
369 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
370 			    sizeof(struct kroute))
371 				fatalx("IMSG_NETWORK_ADD imsg with wrong len");
372 			rt_redist_set(imsg.data, 0);
373 			break;
374 		case IMSG_NETWORK_DEL:
375 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
376 			    sizeof(struct kroute))
377 				fatalx("IMSG_NETWORK_DEL imsg with wrong len");
378 			rt_redist_set(imsg.data, 1);
379 			break;
380 		case IMSG_RECONF_CONF:
381 			if ((nconf = malloc(sizeof(struct eigrpd_conf))) ==
382 			    NULL)
383 				fatal(NULL);
384 			memcpy(nconf, imsg.data, sizeof(struct eigrpd_conf));
385 
386 			TAILQ_INIT(&nconf->iface_list);
387 			TAILQ_INIT(&nconf->instances);
388 			break;
389 		case IMSG_RECONF_INSTANCE:
390 			if ((neigrp = malloc(sizeof(struct eigrp))) == NULL)
391 				fatal(NULL);
392 			memcpy(neigrp, imsg.data, sizeof(struct eigrp));
393 
394 			SIMPLEQ_INIT(&neigrp->redist_list);
395 			TAILQ_INIT(&neigrp->ei_list);
396 			RB_INIT(&neigrp->nbrs);
397 			RB_INIT(&neigrp->topology);
398 			TAILQ_INSERT_TAIL(&nconf->instances, neigrp, entry);
399 			break;
400 		case IMSG_RECONF_IFACE:
401 			niface = imsg.data;
402 			niface = if_lookup(nconf, niface->ifindex);
403 			if (niface)
404 				break;
405 
406 			if ((niface = malloc(sizeof(struct iface))) == NULL)
407 				fatal(NULL);
408 			memcpy(niface, imsg.data, sizeof(struct iface));
409 
410 			TAILQ_INIT(&niface->addr_list);
411 			TAILQ_INSERT_TAIL(&nconf->iface_list, niface, entry);
412 			break;
413 		case IMSG_RECONF_EIGRP_IFACE:
414 			if (niface == NULL)
415 				break;
416 			if ((nei = malloc(sizeof(struct eigrp_iface))) == NULL)
417 				fatal(NULL);
418 			memcpy(nei, imsg.data, sizeof(struct eigrp_iface));
419 
420 			nei->iface = niface;
421 			nei->eigrp = neigrp;
422 			TAILQ_INIT(&nei->nbr_list);
423 			TAILQ_INIT(&nei->update_list);
424 			TAILQ_INIT(&nei->query_list);
425 			TAILQ_INIT(&nei->summary_list);
426 			TAILQ_INSERT_TAIL(&niface->ei_list, nei, i_entry);
427 			TAILQ_INSERT_TAIL(&neigrp->ei_list, nei, e_entry);
428 			if (RB_INSERT(iface_id_head, &ifaces_by_id, nei) !=
429 			    NULL)
430 				fatalx("rde_dispatch_parent: "
431 				    "RB_INSERT(ifaces_by_id) failed");
432 			break;
433 		case IMSG_RECONF_END:
434 			merge_config(rdeconf, nconf);
435 			nconf = NULL;
436 			break;
437 		default:
438 			log_debug("%s: unexpected imsg %d", __func__,
439 			    imsg.hdr.type);
440 			break;
441 		}
442 		imsg_free(&imsg);
443 	}
444 	if (!shut)
445 		imsg_event_add(iev);
446 	else {
447 		/* this pipe is dead, so remove the event handler */
448 		event_del(&iev->ev);
449 		event_loopexit(NULL);
450 	}
451 }
452 
453 void
454 rde_instance_init(struct eigrp *eigrp)
455 {
456 	struct rde_nbr		 nbr;
457 
458 	memset(&nbr, 0, sizeof(nbr));
459 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_REDIST;
460 	eigrp->rnbr_redist = rde_nbr_new(NBR_IDSELF, &nbr);
461 	eigrp->rnbr_redist->eigrp = eigrp;
462 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_SUMMARY;
463 	eigrp->rnbr_summary = rde_nbr_new(NBR_IDSELF, &nbr);
464 	eigrp->rnbr_summary->eigrp = eigrp;
465 }
466 
467 void
468 rde_instance_del(struct eigrp *eigrp)
469 {
470 	struct rde_nbr		*nbr, *safe;
471 	struct rt_node		*rn;
472 
473 	/* clear topology */
474 	while((rn = RB_MIN(rt_tree, &eigrp->topology)) != NULL)
475 		rt_del(rn);
476 
477 	/* clear nbrs */
478 	RB_FOREACH_SAFE(nbr, rde_nbr_head, &rde_nbrs, safe)
479 		if (nbr->eigrp == eigrp)
480 			rde_nbr_del(nbr, 0);
481 	rde_nbr_del(eigrp->rnbr_redist, 0);
482 	rde_nbr_del(eigrp->rnbr_summary, 0);
483 
484 	free(eigrp);
485 }
486 
487 void
488 rde_send_change_kroute(struct rt_node *rn, struct eigrp_route *route)
489 {
490 	struct eigrp	*eigrp = route->nbr->eigrp;
491 	struct kroute	 kr;
492 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
493 
494 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
495 	    log_addr(eigrp->af, &route->nbr->addr));
496 
497 	memset(&kr, 0, sizeof(kr));
498 	kr.af = eigrp->af;
499 	kr.prefix = rn->prefix;
500 	kr.prefixlen = rn->prefixlen;
501 	if (route->nbr->ei) {
502 		kr.nexthop = route->nexthop;
503 		kr.ifindex = route->nbr->ei->iface->ifindex;
504 	} else {
505 		switch (eigrp->af) {
506 		case AF_INET:
507 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
508 			break;
509 		case AF_INET6:
510 			kr.nexthop.v6 = lo6;
511 			break;
512 		default:
513 			fatalx("rde_send_delete_kroute: unknown af");
514 			break;
515 		}
516 		kr.flags = F_BLACKHOLE;
517 	}
518 	if (route->type == EIGRP_ROUTE_EXTERNAL)
519 		kr.priority = rdeconf->fib_priority_external;
520 	else {
521 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
522 			kr.priority = rdeconf->fib_priority_summary;
523 		else
524 			kr.priority = rdeconf->fib_priority_internal;
525 	}
526 
527 	rde_imsg_compose_parent(IMSG_KROUTE_CHANGE, 0, &kr, sizeof(kr));
528 
529 	route->flags |= F_EIGRP_ROUTE_INSTALLED;
530 }
531 
532 void
533 rde_send_delete_kroute(struct rt_node *rn, struct eigrp_route *route)
534 {
535 	struct eigrp	*eigrp = route->nbr->eigrp;
536 	struct kroute	 kr;
537 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
538 
539 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
540 	    log_addr(eigrp->af, &route->nbr->addr));
541 
542 	memset(&kr, 0, sizeof(kr));
543 	kr.af = eigrp->af;
544 	kr.prefix = rn->prefix;
545 	kr.prefixlen = rn->prefixlen;
546 	if (route->nbr->ei) {
547 		kr.nexthop = route->nexthop;
548 		kr.ifindex = route->nbr->ei->iface->ifindex;
549 	} else {
550 		switch (eigrp->af) {
551 		case AF_INET:
552 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
553 			break;
554 		case AF_INET6:
555 			kr.nexthop.v6 = lo6;
556 			break;
557 		default:
558 			fatalx("rde_send_delete_kroute: unknown af");
559 			break;
560 		}
561 		kr.flags = F_BLACKHOLE;
562 	}
563 	if (route->type == EIGRP_ROUTE_EXTERNAL)
564 		kr.priority = rdeconf->fib_priority_external;
565 	else {
566 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
567 			kr.priority = rdeconf->fib_priority_summary;
568 		else
569 			kr.priority = rdeconf->fib_priority_internal;
570 	}
571 
572 	rde_imsg_compose_parent(IMSG_KROUTE_DELETE, 0, &kr, sizeof(kr));
573 
574 	route->flags &= ~F_EIGRP_ROUTE_INSTALLED;
575 }
576 
577 static struct redistribute *
578 eigrp_redistribute(struct eigrp *eigrp, struct kroute *kr)
579 {
580 	struct redistribute	*r;
581 	uint8_t			 is_default = 0;
582 	union eigrpd_addr	 addr;
583 
584 	/* only allow the default route via REDIST_DEFAULT */
585 	if (!eigrp_addrisset(kr->af, &kr->prefix) && kr->prefixlen == 0)
586 		is_default = 1;
587 
588 	SIMPLEQ_FOREACH(r, &eigrp->redist_list, entry) {
589 		switch (r->type & ~REDIST_NO) {
590 		case REDIST_STATIC:
591 			if (is_default)
592 				continue;
593 			if (kr->flags & F_STATIC)
594 				return (r->type & REDIST_NO ? NULL : r);
595 			break;
596 		case REDIST_RIP:
597 			if (is_default)
598 				continue;
599 			if (kr->priority == RTP_RIP)
600 				return (r->type & REDIST_NO ? NULL : r);
601 			break;
602 		case REDIST_OSPF:
603 			if (is_default)
604 				continue;
605 			if (kr->priority == RTP_OSPF)
606 				return (r->type & REDIST_NO ? NULL : r);
607 			break;
608 		case REDIST_CONNECTED:
609 			if (is_default)
610 				continue;
611 			if (kr->flags & F_CONNECTED)
612 				return (r->type & REDIST_NO ? NULL : r);
613 			break;
614 		case REDIST_ADDR:
615 			if (eigrp_addrisset(r->af, &r->addr) &&
616 			    r->prefixlen == 0) {
617 				if (is_default)
618 					return (r->type & REDIST_NO ? NULL : r);
619 				else
620 					return (0);
621 			}
622 
623 			eigrp_applymask(kr->af, &addr, &kr->prefix,
624 			    r->prefixlen);
625 			if (eigrp_addrcmp(kr->af, &addr, &r->addr) == 0 &&
626 			    kr->prefixlen >= r->prefixlen)
627 				return (r->type & REDIST_NO ? NULL : r);
628 			break;
629 		case REDIST_DEFAULT:
630 			if (is_default)
631 				return (r->type & REDIST_NO ? NULL : r);
632 			break;
633 		}
634 	}
635 
636 	return (NULL);
637 }
638 
639 void
640 rt_redist_set(struct kroute *kr, int withdraw)
641 {
642 	struct eigrp		*eigrp;
643 	struct redistribute	*r;
644 	struct redist_metric	*rmetric;
645 	struct rinfo		 ri;
646 
647 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
648 		if (eigrp->af != kr->af)
649 			continue;
650 
651 		r = eigrp_redistribute(eigrp, kr);
652 		if (r == NULL)
653 			continue;
654 
655 		if (r->metric)
656 			rmetric = r->metric;
657 		else if (eigrp->dflt_metric)
658 			rmetric = eigrp->dflt_metric;
659 		else
660 			continue;
661 
662 		memset(&ri, 0, sizeof(ri));
663 		ri.af = kr->af;
664 		ri.type = EIGRP_ROUTE_EXTERNAL;
665 		ri.prefix = kr->prefix;
666 		ri.prefixlen = kr->prefixlen;
667 
668 		/* metric */
669 		if (withdraw)
670 			ri.metric.delay = EIGRP_INFINITE_METRIC;
671 		else
672 			ri.metric.delay = eigrp_composite_delay(rmetric->delay);
673 		ri.metric.bandwidth =
674 		    eigrp_composite_bandwidth(rmetric->bandwidth);
675 		metric_encode_mtu(ri.metric.mtu, rmetric->mtu);
676 		ri.metric.hop_count = 0;
677 		ri.metric.reliability = rmetric->reliability;
678 		ri.metric.load = rmetric->load;
679 		ri.metric.tag = 0;
680 		ri.metric.flags = 0;
681 
682 		/* external metric */
683 		ri.emetric.routerid = htonl(eigrp_router_id(rdeconf));
684 		ri.emetric.as = r->emetric.as;
685 		ri.emetric.tag = r->emetric.tag;
686 		ri.emetric.metric = r->emetric.metric;
687 		if (kr->priority == rdeconf->fib_priority_internal)
688 			ri.emetric.protocol = EIGRP_EXT_PROTO_EIGRP;
689 		else if (kr->priority == RTP_STATIC)
690 			ri.emetric.protocol = EIGRP_EXT_PROTO_STATIC;
691 		else if (kr->priority == RTP_RIP)
692 			ri.emetric.protocol = EIGRP_EXT_PROTO_RIP;
693 		else if (kr->priority == RTP_OSPF)
694 			ri.emetric.protocol = EIGRP_EXT_PROTO_OSPF;
695 		else
696 			ri.emetric.protocol = EIGRP_EXT_PROTO_CONN;
697 		ri.emetric.flags = 0;
698 
699 		rde_check_update(eigrp->rnbr_redist, &ri);
700 	}
701 }
702 
703 void
704 rt_summary_set(struct eigrp *eigrp, struct summary_addr *summary,
705     struct classic_metric *metric)
706 {
707 	struct rinfo		 ri;
708 
709 	memset(&ri, 0, sizeof(ri));
710 	ri.af = eigrp->af;
711 	ri.type = EIGRP_ROUTE_INTERNAL;
712 	ri.prefix = summary->prefix;
713 	ri.prefixlen = summary->prefixlen;
714 	ri.metric = *metric;
715 
716 	rde_check_update(eigrp->rnbr_summary, &ri);
717 }
718 
719 /* send all known routing information to new neighbor */
720 void
721 rt_snap(struct rde_nbr *nbr)
722 {
723 	struct eigrp		*eigrp = nbr->eigrp;
724 	struct rt_node		*rn;
725 	struct rinfo		 ri;
726 
727 	RB_FOREACH(rn, rt_tree, &eigrp->topology)
728 		if (rn->state == DUAL_STA_PASSIVE &&
729 		    !rde_summary_check(nbr->ei, &rn->prefix, rn->prefixlen)) {
730 			rinfo_fill_successor(rn, &ri);
731 			rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE,
732 			    nbr->peerid, 0, &ri, sizeof(ri));
733 		}
734 
735 	rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE_END, nbr->peerid, 0,
736 	    NULL, 0);
737 }
738 
739 struct ctl_rt *
740 rt_to_ctl(struct rt_node *rn, struct eigrp_route *route)
741 {
742 	static struct ctl_rt	 rtctl;
743 
744 	memset(&rtctl, 0, sizeof(rtctl));
745 	rtctl.af = route->nbr->eigrp->af;
746 	rtctl.as = route->nbr->eigrp->as;
747 	rtctl.prefix = rn->prefix;
748 	rtctl.prefixlen = rn->prefixlen;
749 	rtctl.type = route->type;
750 	rtctl.nexthop = route->nexthop;
751 	if (route->nbr->flags & F_RDE_NBR_REDIST)
752 		strlcpy(rtctl.ifname, "redistribute", sizeof(rtctl.ifname));
753 	else if (route->nbr->flags & F_RDE_NBR_SUMMARY)
754 		strlcpy(rtctl.ifname, "summary", sizeof(rtctl.ifname));
755 	else
756 		memcpy(rtctl.ifname, route->nbr->ei->iface->name,
757 		    sizeof(rtctl.ifname));
758 	rtctl.distance = route->distance;
759 	rtctl.rdistance = route->rdistance;
760 	rtctl.fdistance = rn->successor.fdistance;
761 	rtctl.state = rn->state;
762 	/* metric */
763 	rtctl.metric.delay = eigrp_real_delay(route->metric.delay);
764 	/* translate to microseconds */
765 	rtctl.metric.delay *= 10;
766 	rtctl.metric.bandwidth = eigrp_real_bandwidth(route->metric.bandwidth);
767 	rtctl.metric.mtu = metric_decode_mtu(route->metric.mtu);
768 	rtctl.metric.hop_count = route->metric.hop_count;
769 	rtctl.metric.reliability = route->metric.reliability;
770 	rtctl.metric.load = route->metric.load;
771 	/* external metric */
772 	rtctl.emetric = route->emetric;
773 
774 	if (route->nbr == rn->successor.nbr)
775 		rtctl.flags |= F_CTL_RT_SUCCESSOR;
776 	else if (route->rdistance < rn->successor.fdistance)
777 		rtctl.flags |= F_CTL_RT_FSUCCESSOR;
778 
779 	return (&rtctl);
780 }
781 
782 void
783 rt_dump(struct ctl_show_topology_req *treq, pid_t pid)
784 {
785 	struct eigrp		*eigrp;
786 	struct rt_node		*rn;
787 	struct eigrp_route	*route;
788 	struct ctl_rt		*rtctl;
789 	int			 first = 1;
790 
791 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
792 		RB_FOREACH(rn, rt_tree, &eigrp->topology) {
793 			if (eigrp_addrisset(treq->af, &treq->prefix) &&
794 			    eigrp_addrcmp(treq->af, &treq->prefix,
795 			    &rn->prefix))
796 				continue;
797 
798 			if (treq->prefixlen &&
799 			    (treq->prefixlen != rn->prefixlen))
800 				continue;
801 
802 			first = 1;
803 			TAILQ_FOREACH(route, &rn->routes, entry) {
804 				if (treq->flags & F_CTL_ACTIVE &&
805 				    !(rn->state & DUAL_STA_ACTIVE_ALL))
806 					continue;
807 				if (!(treq->flags & F_CTL_ALLLINKS) &&
808 				    route->rdistance >= rn->successor.fdistance)
809 					continue;
810 
811 				rtctl = rt_to_ctl(rn, route);
812 				if (first) {
813 					rtctl->flags |= F_CTL_RT_FIRST;
814 					first = 0;
815 				}
816 				rde_imsg_compose_eigrpe(IMSG_CTL_SHOW_TOPOLOGY,
817 				    0, pid, rtctl, sizeof(*rtctl));
818 			}
819 		}
820 	}
821 }
822