xref: /openbsd/usr.sbin/eigrpd/rde.c (revision 36dfa452)
1 /*	$OpenBSD: rde.c,v 1.13 2016/04/15 13:27:58 renato Exp $ */
2 
3 /*
4  * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <arpa/inet.h>
25 #include <errno.h>
26 #include <signal.h>
27 #include <string.h>
28 #include <pwd.h>
29 
30 #include "eigrp.h"
31 #include "eigrpd.h"
32 #include "eigrpe.h"
33 #include "log.h"
34 #include "rde.h"
35 
36 void		 rde_sig_handler(int sig, short, void *);
37 void		 rde_shutdown(void);
38 void		 rde_dispatch_imsg(int, short, void *);
39 void		 rde_dispatch_parent(int, short, void *);
40 
41 struct eigrpd_conf	*rdeconf = NULL, *nconf;
42 struct imsgev		*iev_eigrpe;
43 struct imsgev		*iev_main;
44 
45 extern struct iface_id_head ifaces_by_id;
46 RB_PROTOTYPE(iface_id_head, eigrp_iface, id_tree, iface_id_compare)
47 
48 RB_PROTOTYPE(rt_tree, rt_node, entry, rt_compare)
49 
50 extern struct rde_nbr_head rde_nbrs;
51 RB_PROTOTYPE(rde_nbr_head, rde_nbr, entry, rde_nbr_compare)
52 
53 /* ARGSUSED */
54 void
55 rde_sig_handler(int sig, short event, void *arg)
56 {
57 	/*
58 	 * signal handler rules don't apply, libevent decouples for us
59 	 */
60 
61 	switch (sig) {
62 	case SIGINT:
63 	case SIGTERM:
64 		rde_shutdown();
65 		/* NOTREACHED */
66 	default:
67 		fatalx("unexpected signal");
68 	}
69 }
70 
71 /* route decision engine */
72 pid_t
73 rde(struct eigrpd_conf *xconf, int pipe_parent2rde[2], int pipe_eigrpe2rde[2],
74     int pipe_parent2eigrpe[2])
75 {
76 	struct event		 ev_sigint, ev_sigterm;
77 	struct timeval		 now;
78 	struct passwd		*pw;
79 	pid_t			 pid;
80 	struct eigrp		*eigrp;
81 
82 	switch (pid = fork()) {
83 	case -1:
84 		fatal("cannot fork");
85 		/* NOTREACHED */
86 	case 0:
87 		break;
88 	default:
89 		return (pid);
90 	}
91 
92 	rdeconf = xconf;
93 
94 	if ((pw = getpwnam(EIGRPD_USER)) == NULL)
95 		fatal("getpwnam");
96 
97 	if (chroot(pw->pw_dir) == -1)
98 		fatal("chroot");
99 	if (chdir("/") == -1)
100 		fatal("chdir(\"/\")");
101 
102 	setproctitle("route decision engine");
103 	eigrpd_process = PROC_RDE_ENGINE;
104 
105 	if (setgroups(1, &pw->pw_gid) ||
106 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
107 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
108 		fatal("can't drop privileges");
109 
110 	if (pledge("stdio", NULL) == -1)
111 		fatal("pledge");
112 
113 	event_init();
114 
115 	/* setup signal handler */
116 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
117 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
118 	signal_add(&ev_sigint, NULL);
119 	signal_add(&ev_sigterm, NULL);
120 	signal(SIGPIPE, SIG_IGN);
121 	signal(SIGHUP, SIG_IGN);
122 
123 	/* setup pipes */
124 	close(pipe_eigrpe2rde[0]);
125 	close(pipe_parent2rde[0]);
126 	close(pipe_parent2eigrpe[0]);
127 	close(pipe_parent2eigrpe[1]);
128 
129 	if ((iev_eigrpe = malloc(sizeof(struct imsgev))) == NULL ||
130 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
131 		fatal(NULL);
132 	imsg_init(&iev_eigrpe->ibuf, pipe_eigrpe2rde[1]);
133 	iev_eigrpe->handler = rde_dispatch_imsg;
134 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
135 	iev_main->handler = rde_dispatch_parent;
136 
137 	/* setup event handler */
138 	iev_eigrpe->events = EV_READ;
139 	event_set(&iev_eigrpe->ev, iev_eigrpe->ibuf.fd, iev_eigrpe->events,
140 	    iev_eigrpe->handler, iev_eigrpe);
141 	event_add(&iev_eigrpe->ev, NULL);
142 
143 	iev_main->events = EV_READ;
144 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
145 	    iev_main->handler, iev_main);
146 	event_add(&iev_main->ev, NULL);
147 
148 	gettimeofday(&now, NULL);
149 	global.uptime = now.tv_sec;
150 
151 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry)
152 		rde_instance_init(eigrp);
153 
154 	event_dispatch();
155 
156 	rde_shutdown();
157 	/* NOTREACHED */
158 
159 	return (0);
160 }
161 
162 void
163 rde_shutdown(void)
164 {
165 	config_clear(rdeconf);
166 
167 	msgbuf_clear(&iev_eigrpe->ibuf.w);
168 	free(iev_eigrpe);
169 	msgbuf_clear(&iev_main->ibuf.w);
170 	free(iev_main);
171 
172 	log_info("route decision engine exiting");
173 	_exit(0);
174 }
175 
176 int
177 rde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen)
178 {
179 	return (imsg_compose_event(iev_main, type, 0, pid, -1,
180 	    data, datalen));
181 }
182 
183 int
184 rde_imsg_compose_eigrpe(int type, uint32_t peerid, pid_t pid, void *data,
185     uint16_t datalen)
186 {
187 	return (imsg_compose_event(iev_eigrpe, type, peerid, pid, -1,
188 	    data, datalen));
189 }
190 
191 /* ARGSUSED */
192 void
193 rde_dispatch_imsg(int fd, short event, void *bula)
194 {
195 	struct imsgev		*iev = bula;
196 	struct imsgbuf		*ibuf;
197 	struct imsg		 imsg;
198 	struct rde_nbr		*nbr;
199 	struct rde_nbr		 new;
200 	struct rinfo		 rinfo;
201 	ssize_t			 n;
202 	int			 shut = 0, verbose;
203 
204 	ibuf = &iev->ibuf;
205 
206 	if (event & EV_READ) {
207 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
208 			fatal("imsg_read error");
209 		if (n == 0)	/* connection closed */
210 			shut = 1;
211 	}
212 	if (event & EV_WRITE) {
213 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
214 			fatal("msgbuf_write");
215 		if (n == 0)	/* connection closed */
216 			shut = 1;
217 	}
218 
219 	for (;;) {
220 		if ((n = imsg_get(ibuf, &imsg)) == -1)
221 			fatal("rde_dispatch_imsg: imsg_get error");
222 		if (n == 0)
223 			break;
224 
225 		switch (imsg.hdr.type) {
226 		case IMSG_NEIGHBOR_UP:
227 			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
228 			    sizeof(struct rde_nbr))
229 				fatalx("invalid size of neighbor request");
230 			memcpy(&new, imsg.data, sizeof(new));
231 
232 			if (rde_nbr_find(imsg.hdr.peerid))
233 				fatalx("rde_dispatch_imsg: "
234 				    "neighbor already exists");
235 			rde_nbr_new(imsg.hdr.peerid, &new);
236 			break;
237 		case IMSG_NEIGHBOR_DOWN:
238 			nbr = rde_nbr_find(imsg.hdr.peerid);
239 			if (nbr == NULL) {
240 				log_debug("%s: cannot find rde neighbor",
241 				    __func__);
242 				break;
243 			}
244 
245 			rde_check_link_down_nbr(nbr);
246 			rde_flush_queries();
247 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid), 0);
248 			break;
249 		case IMSG_RECV_UPDATE_INIT:
250 			nbr = rde_nbr_find(imsg.hdr.peerid);
251 			if (nbr == NULL) {
252 				log_debug("%s: cannot find rde neighbor",
253 				    __func__);
254 				break;
255 			}
256 
257 			rt_snap(nbr);
258 			break;
259 		case IMSG_RECV_UPDATE:
260 		case IMSG_RECV_QUERY:
261 		case IMSG_RECV_REPLY:
262 		case IMSG_RECV_SIAQUERY:
263 		case IMSG_RECV_SIAREPLY:
264 			nbr = rde_nbr_find(imsg.hdr.peerid);
265 			if (nbr == NULL) {
266 				log_debug("%s: cannot find rde neighbor",
267 				    __func__);
268 				break;
269 			}
270 
271 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rinfo))
272 				fatalx("invalid size of rinfo");
273 			memcpy(&rinfo, imsg.data, sizeof(rinfo));
274 
275 			switch (imsg.hdr.type) {
276 			case IMSG_RECV_UPDATE:
277 				rde_check_update(nbr, &rinfo);
278 				break;
279 			case IMSG_RECV_QUERY:
280 				rde_check_query(nbr, &rinfo, 0);
281 				break;
282 			case IMSG_RECV_REPLY:
283 				rde_check_reply(nbr, &rinfo, 0);
284 				break;
285 			case IMSG_RECV_SIAQUERY:
286 				rde_check_query(nbr, &rinfo, 1);
287 				break;
288 			case IMSG_RECV_SIAREPLY:
289 				rde_check_reply(nbr, &rinfo, 1);
290 				break;
291 			}
292 			break;
293 		case IMSG_CTL_SHOW_TOPOLOGY:
294 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
295 			    sizeof(struct ctl_show_topology_req)) {
296 				log_warnx("%s: wrong imsg len", __func__);
297 				break;
298 			}
299 
300 			rt_dump(imsg.data, imsg.hdr.pid);
301 			rde_imsg_compose_eigrpe(IMSG_CTL_END, 0, imsg.hdr.pid,
302 			    NULL, 0);
303 			break;
304 		case IMSG_CTL_LOG_VERBOSE:
305 			/* already checked by eigrpe */
306 			memcpy(&verbose, imsg.data, sizeof(verbose));
307 			log_verbose(verbose);
308 			break;
309 		default:
310 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
311 			    imsg.hdr.type);
312 			break;
313 		}
314 		imsg_free(&imsg);
315 	}
316 	if (!shut)
317 		imsg_event_add(iev);
318 	else {
319 		/* this pipe is dead, so remove the event handler */
320 		event_del(&iev->ev);
321 		event_loopexit(NULL);
322 	}
323 }
324 
325 /* ARGSUSED */
326 void
327 rde_dispatch_parent(int fd, short event, void *bula)
328 {
329 	struct iface		*niface = NULL;
330 	static struct eigrp	*neigrp;
331 	struct eigrp_iface	*nei;
332 	struct imsg		 imsg;
333 	struct imsgev		*iev = bula;
334 	struct imsgbuf		*ibuf;
335 	struct kif		*kif;
336 	ssize_t			 n;
337 	int			 shut = 0;
338 
339 	ibuf = &iev->ibuf;
340 
341 	if (event & EV_READ) {
342 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
343 			fatal("imsg_read error");
344 		if (n == 0)	/* connection closed */
345 			shut = 1;
346 	}
347 	if (event & EV_WRITE) {
348 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
349 			fatal("msgbuf_write");
350 		if (n == 0)	/* connection closed */
351 			shut = 1;
352 	}
353 
354 	for (;;) {
355 		if ((n = imsg_get(ibuf, &imsg)) == -1)
356 			fatal("rde_dispatch_parent: imsg_get error");
357 		if (n == 0)
358 			break;
359 
360 		switch (imsg.hdr.type) {
361 		case IMSG_IFDOWN:
362 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
363 			    sizeof(struct kif))
364 				fatalx("IFDOWN imsg with wrong len");
365 			kif = imsg.data;
366 			rde_check_link_down(kif->ifindex);
367 			break;
368 		case IMSG_NETWORK_ADD:
369 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
370 			    sizeof(struct kroute))
371 				fatalx("IMSG_NETWORK_ADD imsg with wrong len");
372 			rt_redist_set(imsg.data, 0);
373 			break;
374 		case IMSG_NETWORK_DEL:
375 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
376 			    sizeof(struct kroute))
377 				fatalx("IMSG_NETWORK_DEL imsg with wrong len");
378 			rt_redist_set(imsg.data, 1);
379 			break;
380 		case IMSG_RECONF_CONF:
381 			if ((nconf = malloc(sizeof(struct eigrpd_conf))) ==
382 			    NULL)
383 				fatal(NULL);
384 			memcpy(nconf, imsg.data, sizeof(struct eigrpd_conf));
385 
386 			TAILQ_INIT(&nconf->iface_list);
387 			TAILQ_INIT(&nconf->instances);
388 			break;
389 		case IMSG_RECONF_INSTANCE:
390 			if ((neigrp = malloc(sizeof(struct eigrp))) == NULL)
391 				fatal(NULL);
392 			memcpy(neigrp, imsg.data, sizeof(struct eigrp));
393 
394 			SIMPLEQ_INIT(&neigrp->redist_list);
395 			TAILQ_INIT(&neigrp->ei_list);
396 			RB_INIT(&neigrp->nbrs);
397 			RB_INIT(&neigrp->topology);
398 			TAILQ_INSERT_TAIL(&nconf->instances, neigrp, entry);
399 			break;
400 		case IMSG_RECONF_IFACE:
401 			niface = imsg.data;
402 			niface = if_lookup(nconf, niface->ifindex);
403 			if (niface)
404 				break;
405 
406 			if ((niface = malloc(sizeof(struct iface))) == NULL)
407 				fatal(NULL);
408 			memcpy(niface, imsg.data, sizeof(struct iface));
409 
410 			TAILQ_INIT(&niface->ei_list);
411 			TAILQ_INIT(&niface->addr_list);
412 			TAILQ_INSERT_TAIL(&nconf->iface_list, niface, entry);
413 			break;
414 		case IMSG_RECONF_EIGRP_IFACE:
415 			if (niface == NULL)
416 				break;
417 			if ((nei = malloc(sizeof(struct eigrp_iface))) == NULL)
418 				fatal(NULL);
419 			memcpy(nei, imsg.data, sizeof(struct eigrp_iface));
420 
421 			nei->iface = niface;
422 			nei->eigrp = neigrp;
423 			TAILQ_INIT(&nei->nbr_list);
424 			TAILQ_INIT(&nei->update_list);
425 			TAILQ_INIT(&nei->query_list);
426 			TAILQ_INIT(&nei->summary_list);
427 			TAILQ_INSERT_TAIL(&niface->ei_list, nei, i_entry);
428 			TAILQ_INSERT_TAIL(&neigrp->ei_list, nei, e_entry);
429 			if (RB_INSERT(iface_id_head, &ifaces_by_id, nei) !=
430 			    NULL)
431 				fatalx("rde_dispatch_parent: "
432 				    "RB_INSERT(ifaces_by_id) failed");
433 			break;
434 		case IMSG_RECONF_END:
435 			merge_config(rdeconf, nconf);
436 			nconf = NULL;
437 			break;
438 		default:
439 			log_debug("%s: unexpected imsg %d", __func__,
440 			    imsg.hdr.type);
441 			break;
442 		}
443 		imsg_free(&imsg);
444 	}
445 	if (!shut)
446 		imsg_event_add(iev);
447 	else {
448 		/* this pipe is dead, so remove the event handler */
449 		event_del(&iev->ev);
450 		event_loopexit(NULL);
451 	}
452 }
453 
454 void
455 rde_instance_init(struct eigrp *eigrp)
456 {
457 	struct rde_nbr		 nbr;
458 
459 	memset(&nbr, 0, sizeof(nbr));
460 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_REDIST;
461 	eigrp->rnbr_redist = rde_nbr_new(NBR_IDSELF, &nbr);
462 	eigrp->rnbr_redist->eigrp = eigrp;
463 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_SUMMARY;
464 	eigrp->rnbr_summary = rde_nbr_new(NBR_IDSELF, &nbr);
465 	eigrp->rnbr_summary->eigrp = eigrp;
466 }
467 
468 void
469 rde_instance_del(struct eigrp *eigrp)
470 {
471 	struct rde_nbr		*nbr, *safe;
472 	struct rt_node		*rn;
473 
474 	/* clear topology */
475 	while((rn = RB_MIN(rt_tree, &eigrp->topology)) != NULL)
476 		rt_del(rn);
477 
478 	/* clear nbrs */
479 	RB_FOREACH_SAFE(nbr, rde_nbr_head, &rde_nbrs, safe)
480 		if (nbr->eigrp == eigrp)
481 			rde_nbr_del(nbr, 0);
482 	rde_nbr_del(eigrp->rnbr_redist, 0);
483 	rde_nbr_del(eigrp->rnbr_summary, 0);
484 
485 	free(eigrp);
486 }
487 
488 void
489 rde_send_change_kroute(struct rt_node *rn, struct eigrp_route *route)
490 {
491 	struct eigrp	*eigrp = route->nbr->eigrp;
492 	struct kroute	 kr;
493 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
494 
495 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
496 	    log_addr(eigrp->af, &route->nbr->addr));
497 
498 	memset(&kr, 0, sizeof(kr));
499 	kr.af = eigrp->af;
500 	kr.prefix = rn->prefix;
501 	kr.prefixlen = rn->prefixlen;
502 	if (route->nbr->ei) {
503 		kr.nexthop = route->nexthop;
504 		kr.ifindex = route->nbr->ei->iface->ifindex;
505 	} else {
506 		switch (eigrp->af) {
507 		case AF_INET:
508 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
509 			break;
510 		case AF_INET6:
511 			kr.nexthop.v6 = lo6;
512 			break;
513 		default:
514 			fatalx("rde_send_delete_kroute: unknown af");
515 			break;
516 		}
517 		kr.flags = F_BLACKHOLE;
518 	}
519 	if (route->type == EIGRP_ROUTE_EXTERNAL)
520 		kr.priority = rdeconf->fib_priority_external;
521 	else {
522 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
523 			kr.priority = rdeconf->fib_priority_summary;
524 		else
525 			kr.priority = rdeconf->fib_priority_internal;
526 	}
527 
528 	rde_imsg_compose_parent(IMSG_KROUTE_CHANGE, 0, &kr, sizeof(kr));
529 
530 	route->flags |= F_EIGRP_ROUTE_INSTALLED;
531 }
532 
533 void
534 rde_send_delete_kroute(struct rt_node *rn, struct eigrp_route *route)
535 {
536 	struct eigrp	*eigrp = route->nbr->eigrp;
537 	struct kroute	 kr;
538 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
539 
540 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
541 	    log_addr(eigrp->af, &route->nbr->addr));
542 
543 	memset(&kr, 0, sizeof(kr));
544 	kr.af = eigrp->af;
545 	kr.prefix = rn->prefix;
546 	kr.prefixlen = rn->prefixlen;
547 	if (route->nbr->ei) {
548 		kr.nexthop = route->nexthop;
549 		kr.ifindex = route->nbr->ei->iface->ifindex;
550 	} else {
551 		switch (eigrp->af) {
552 		case AF_INET:
553 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
554 			break;
555 		case AF_INET6:
556 			kr.nexthop.v6 = lo6;
557 			break;
558 		default:
559 			fatalx("rde_send_delete_kroute: unknown af");
560 			break;
561 		}
562 		kr.flags = F_BLACKHOLE;
563 	}
564 	if (route->type == EIGRP_ROUTE_EXTERNAL)
565 		kr.priority = rdeconf->fib_priority_external;
566 	else {
567 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
568 			kr.priority = rdeconf->fib_priority_summary;
569 		else
570 			kr.priority = rdeconf->fib_priority_internal;
571 	}
572 
573 	rde_imsg_compose_parent(IMSG_KROUTE_DELETE, 0, &kr, sizeof(kr));
574 
575 	route->flags &= ~F_EIGRP_ROUTE_INSTALLED;
576 }
577 
578 static struct redistribute *
579 eigrp_redistribute(struct eigrp *eigrp, struct kroute *kr)
580 {
581 	struct redistribute	*r;
582 	uint8_t			 is_default = 0;
583 	union eigrpd_addr	 addr;
584 
585 	/* only allow the default route via REDIST_DEFAULT */
586 	if (!eigrp_addrisset(kr->af, &kr->prefix) && kr->prefixlen == 0)
587 		is_default = 1;
588 
589 	SIMPLEQ_FOREACH(r, &eigrp->redist_list, entry) {
590 		switch (r->type & ~REDIST_NO) {
591 		case REDIST_STATIC:
592 			if (is_default)
593 				continue;
594 			if (kr->flags & F_STATIC)
595 				return (r->type & REDIST_NO ? NULL : r);
596 			break;
597 		case REDIST_RIP:
598 			if (is_default)
599 				continue;
600 			if (kr->priority == RTP_RIP)
601 				return (r->type & REDIST_NO ? NULL : r);
602 			break;
603 		case REDIST_OSPF:
604 			if (is_default)
605 				continue;
606 			if (kr->priority == RTP_OSPF)
607 				return (r->type & REDIST_NO ? NULL : r);
608 			break;
609 		case REDIST_CONNECTED:
610 			if (is_default)
611 				continue;
612 			if (kr->flags & F_CONNECTED)
613 				return (r->type & REDIST_NO ? NULL : r);
614 			break;
615 		case REDIST_ADDR:
616 			if (eigrp_addrisset(r->af, &r->addr) &&
617 			    r->prefixlen == 0) {
618 				if (is_default)
619 					return (r->type & REDIST_NO ? NULL : r);
620 				else
621 					return (0);
622 			}
623 
624 			eigrp_applymask(kr->af, &addr, &kr->prefix,
625 			    r->prefixlen);
626 			if (eigrp_addrcmp(kr->af, &addr, &r->addr) == 0 &&
627 			    kr->prefixlen >= r->prefixlen)
628 				return (r->type & REDIST_NO ? NULL : r);
629 			break;
630 		case REDIST_DEFAULT:
631 			if (is_default)
632 				return (r->type & REDIST_NO ? NULL : r);
633 			break;
634 		}
635 	}
636 
637 	return (NULL);
638 }
639 
640 void
641 rt_redist_set(struct kroute *kr, int withdraw)
642 {
643 	struct eigrp		*eigrp;
644 	struct redistribute	*r;
645 	struct redist_metric	*rmetric;
646 	struct rinfo		 ri;
647 
648 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
649 		if (eigrp->af != kr->af)
650 			continue;
651 
652 		r = eigrp_redistribute(eigrp, kr);
653 		if (r == NULL)
654 			continue;
655 
656 		if (r->metric)
657 			rmetric = r->metric;
658 		else if (eigrp->dflt_metric)
659 			rmetric = eigrp->dflt_metric;
660 		else
661 			continue;
662 
663 		memset(&ri, 0, sizeof(ri));
664 		ri.af = kr->af;
665 		ri.type = EIGRP_ROUTE_EXTERNAL;
666 		ri.prefix = kr->prefix;
667 		ri.prefixlen = kr->prefixlen;
668 
669 		/* metric */
670 		if (withdraw)
671 			ri.metric.delay = EIGRP_INFINITE_METRIC;
672 		else
673 			ri.metric.delay = eigrp_composite_delay(rmetric->delay);
674 		ri.metric.bandwidth =
675 		    eigrp_composite_bandwidth(rmetric->bandwidth);
676 		metric_encode_mtu(ri.metric.mtu, rmetric->mtu);
677 		ri.metric.hop_count = 0;
678 		ri.metric.reliability = rmetric->reliability;
679 		ri.metric.load = rmetric->load;
680 		ri.metric.tag = 0;
681 		ri.metric.flags = 0;
682 
683 		/* external metric */
684 		ri.emetric.routerid = htonl(eigrp_router_id(rdeconf));
685 		ri.emetric.as = r->emetric.as;
686 		ri.emetric.tag = r->emetric.tag;
687 		ri.emetric.metric = r->emetric.metric;
688 		if (kr->priority == rdeconf->fib_priority_internal)
689 			ri.emetric.protocol = EIGRP_EXT_PROTO_EIGRP;
690 		else if (kr->priority == RTP_STATIC)
691 			ri.emetric.protocol = EIGRP_EXT_PROTO_STATIC;
692 		else if (kr->priority == RTP_RIP)
693 			ri.emetric.protocol = EIGRP_EXT_PROTO_RIP;
694 		else if (kr->priority == RTP_OSPF)
695 			ri.emetric.protocol = EIGRP_EXT_PROTO_OSPF;
696 		else
697 			ri.emetric.protocol = EIGRP_EXT_PROTO_CONN;
698 		ri.emetric.flags = 0;
699 
700 		rde_check_update(eigrp->rnbr_redist, &ri);
701 	}
702 }
703 
704 void
705 rt_summary_set(struct eigrp *eigrp, struct summary_addr *summary,
706     struct classic_metric *metric)
707 {
708 	struct rinfo		 ri;
709 
710 	memset(&ri, 0, sizeof(ri));
711 	ri.af = eigrp->af;
712 	ri.type = EIGRP_ROUTE_INTERNAL;
713 	ri.prefix = summary->prefix;
714 	ri.prefixlen = summary->prefixlen;
715 	ri.metric = *metric;
716 
717 	rde_check_update(eigrp->rnbr_summary, &ri);
718 }
719 
720 /* send all known routing information to new neighbor */
721 void
722 rt_snap(struct rde_nbr *nbr)
723 {
724 	struct eigrp		*eigrp = nbr->eigrp;
725 	struct rt_node		*rn;
726 	struct rinfo		 ri;
727 
728 	RB_FOREACH(rn, rt_tree, &eigrp->topology)
729 		if (rn->state == DUAL_STA_PASSIVE &&
730 		    !rde_summary_check(nbr->ei, &rn->prefix, rn->prefixlen)) {
731 			rinfo_fill_successor(rn, &ri);
732 			rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE,
733 			    nbr->peerid, 0, &ri, sizeof(ri));
734 		}
735 
736 	rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE_END, nbr->peerid, 0,
737 	    NULL, 0);
738 }
739 
740 struct ctl_rt *
741 rt_to_ctl(struct rt_node *rn, struct eigrp_route *route)
742 {
743 	static struct ctl_rt	 rtctl;
744 
745 	memset(&rtctl, 0, sizeof(rtctl));
746 	rtctl.af = route->nbr->eigrp->af;
747 	rtctl.as = route->nbr->eigrp->as;
748 	rtctl.prefix = rn->prefix;
749 	rtctl.prefixlen = rn->prefixlen;
750 	rtctl.type = route->type;
751 	rtctl.nexthop = route->nexthop;
752 	if (route->nbr->flags & F_RDE_NBR_REDIST)
753 		strlcpy(rtctl.ifname, "redistribute", sizeof(rtctl.ifname));
754 	else if (route->nbr->flags & F_RDE_NBR_SUMMARY)
755 		strlcpy(rtctl.ifname, "summary", sizeof(rtctl.ifname));
756 	else
757 		memcpy(rtctl.ifname, route->nbr->ei->iface->name,
758 		    sizeof(rtctl.ifname));
759 	rtctl.distance = route->distance;
760 	rtctl.rdistance = route->rdistance;
761 	rtctl.fdistance = rn->successor.fdistance;
762 	rtctl.state = rn->state;
763 	/* metric */
764 	rtctl.metric.delay = eigrp_real_delay(route->metric.delay);
765 	/* translate to microseconds */
766 	rtctl.metric.delay *= 10;
767 	rtctl.metric.bandwidth = eigrp_real_bandwidth(route->metric.bandwidth);
768 	rtctl.metric.mtu = metric_decode_mtu(route->metric.mtu);
769 	rtctl.metric.hop_count = route->metric.hop_count;
770 	rtctl.metric.reliability = route->metric.reliability;
771 	rtctl.metric.load = route->metric.load;
772 	/* external metric */
773 	rtctl.emetric = route->emetric;
774 
775 	if (route->nbr == rn->successor.nbr)
776 		rtctl.flags |= F_CTL_RT_SUCCESSOR;
777 	else if (route->rdistance < rn->successor.fdistance)
778 		rtctl.flags |= F_CTL_RT_FSUCCESSOR;
779 
780 	return (&rtctl);
781 }
782 
783 void
784 rt_dump(struct ctl_show_topology_req *treq, pid_t pid)
785 {
786 	struct eigrp		*eigrp;
787 	struct rt_node		*rn;
788 	struct eigrp_route	*route;
789 	struct ctl_rt		*rtctl;
790 	int			 first = 1;
791 
792 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
793 		RB_FOREACH(rn, rt_tree, &eigrp->topology) {
794 			if (eigrp_addrisset(treq->af, &treq->prefix) &&
795 			    eigrp_addrcmp(treq->af, &treq->prefix,
796 			    &rn->prefix))
797 				continue;
798 
799 			if (treq->prefixlen &&
800 			    (treq->prefixlen != rn->prefixlen))
801 				continue;
802 
803 			first = 1;
804 			TAILQ_FOREACH(route, &rn->routes, entry) {
805 				if (treq->flags & F_CTL_ACTIVE &&
806 				    !(rn->state & DUAL_STA_ACTIVE_ALL))
807 					continue;
808 				if (!(treq->flags & F_CTL_ALLLINKS) &&
809 				    route->rdistance >= rn->successor.fdistance)
810 					continue;
811 
812 				rtctl = rt_to_ctl(rn, route);
813 				if (first) {
814 					rtctl->flags |= F_CTL_RT_FIRST;
815 					first = 0;
816 				}
817 				rde_imsg_compose_eigrpe(IMSG_CTL_SHOW_TOPOLOGY,
818 				    0, pid, rtctl, sizeof(*rtctl));
819 			}
820 		}
821 	}
822 }
823