xref: /openbsd/usr.sbin/eigrpd/rde.c (revision 02327c74)
1 /*	$OpenBSD: rde.c,v 1.9 2016/01/15 12:25:43 renato Exp $ */
2 
3 /*
4  * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <arpa/inet.h>
25 #include <errno.h>
26 #include <signal.h>
27 #include <string.h>
28 #include <pwd.h>
29 
30 #include "eigrp.h"
31 #include "eigrpd.h"
32 #include "eigrpe.h"
33 #include "log.h"
34 #include "rde.h"
35 
36 void		 rde_sig_handler(int sig, short, void *);
37 void		 rde_shutdown(void);
38 void		 rde_dispatch_imsg(int, short, void *);
39 void		 rde_dispatch_parent(int, short, void *);
40 
41 struct eigrpd_conf	*rdeconf = NULL, *nconf;
42 struct imsgev		*iev_eigrpe;
43 struct imsgev		*iev_main;
44 
45 extern struct iface_id_head ifaces_by_id;
46 RB_PROTOTYPE(iface_id_head, eigrp_iface, id_tree, iface_id_compare)
47 
48 RB_PROTOTYPE(rt_tree, rt_node, entry, rt_compare)
49 
50 extern struct rde_nbr_head rde_nbrs;
51 RB_PROTOTYPE(rde_nbr_head, rde_nbr, entry, rde_nbr_compare)
52 
53 /* ARGSUSED */
54 void
55 rde_sig_handler(int sig, short event, void *arg)
56 {
57 	/*
58 	 * signal handler rules don't apply, libevent decouples for us
59 	 */
60 
61 	switch (sig) {
62 	case SIGINT:
63 	case SIGTERM:
64 		rde_shutdown();
65 		/* NOTREACHED */
66 	default:
67 		fatalx("unexpected signal");
68 	}
69 }
70 
71 /* route decision engine */
72 pid_t
73 rde(struct eigrpd_conf *xconf, int pipe_parent2rde[2], int pipe_eigrpe2rde[2],
74     int pipe_parent2eigrpe[2])
75 {
76 	struct event		 ev_sigint, ev_sigterm;
77 	struct timeval		 now;
78 	struct passwd		*pw;
79 	pid_t			 pid;
80 	struct eigrp		*eigrp;
81 
82 	switch (pid = fork()) {
83 	case -1:
84 		fatal("cannot fork");
85 		/* NOTREACHED */
86 	case 0:
87 		break;
88 	default:
89 		return (pid);
90 	}
91 
92 	rdeconf = xconf;
93 
94 	if ((pw = getpwnam(EIGRPD_USER)) == NULL)
95 		fatal("getpwnam");
96 
97 	if (chroot(pw->pw_dir) == -1)
98 		fatal("chroot");
99 	if (chdir("/") == -1)
100 		fatal("chdir(\"/\")");
101 
102 	setproctitle("route decision engine");
103 	eigrpd_process = PROC_RDE_ENGINE;
104 
105 	if (setgroups(1, &pw->pw_gid) ||
106 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
107 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
108 		fatal("can't drop privileges");
109 
110 	if (pledge("stdio", NULL) == -1)
111 		fatal("pledge");
112 
113 	event_init();
114 
115 	/* setup signal handler */
116 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
117 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
118 	signal_add(&ev_sigint, NULL);
119 	signal_add(&ev_sigterm, NULL);
120 	signal(SIGPIPE, SIG_IGN);
121 	signal(SIGHUP, SIG_IGN);
122 
123 	/* setup pipes */
124 	close(pipe_eigrpe2rde[0]);
125 	close(pipe_parent2rde[0]);
126 	close(pipe_parent2eigrpe[0]);
127 	close(pipe_parent2eigrpe[1]);
128 
129 	if ((iev_eigrpe = malloc(sizeof(struct imsgev))) == NULL ||
130 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
131 		fatal(NULL);
132 	imsg_init(&iev_eigrpe->ibuf, pipe_eigrpe2rde[1]);
133 	iev_eigrpe->handler = rde_dispatch_imsg;
134 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
135 	iev_main->handler = rde_dispatch_parent;
136 
137 	/* setup event handler */
138 	iev_eigrpe->events = EV_READ;
139 	event_set(&iev_eigrpe->ev, iev_eigrpe->ibuf.fd, iev_eigrpe->events,
140 	    iev_eigrpe->handler, iev_eigrpe);
141 	event_add(&iev_eigrpe->ev, NULL);
142 
143 	iev_main->events = EV_READ;
144 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
145 	    iev_main->handler, iev_main);
146 	event_add(&iev_main->ev, NULL);
147 
148 	gettimeofday(&now, NULL);
149 	rdeconf->uptime = now.tv_sec;
150 
151 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry)
152 		rde_instance_init(eigrp);
153 
154 	event_dispatch();
155 
156 	rde_shutdown();
157 	/* NOTREACHED */
158 
159 	return (0);
160 }
161 
162 void
163 rde_shutdown(void)
164 {
165 	config_clear(rdeconf);
166 
167 	msgbuf_clear(&iev_eigrpe->ibuf.w);
168 	free(iev_eigrpe);
169 	msgbuf_clear(&iev_main->ibuf.w);
170 	free(iev_main);
171 
172 	log_info("route decision engine exiting");
173 	_exit(0);
174 }
175 
176 int
177 rde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen)
178 {
179 	return (imsg_compose_event(iev_main, type, 0, pid, -1,
180 	    data, datalen));
181 }
182 
183 int
184 rde_imsg_compose_eigrpe(int type, uint32_t peerid, pid_t pid, void *data,
185     uint16_t datalen)
186 {
187 	return (imsg_compose_event(iev_eigrpe, type, peerid, pid, -1,
188 	    data, datalen));
189 }
190 
191 /* ARGSUSED */
192 void
193 rde_dispatch_imsg(int fd, short event, void *bula)
194 {
195 	struct imsgev		*iev = bula;
196 	struct imsgbuf		*ibuf;
197 	struct imsg		 imsg;
198 	struct rde_nbr		*nbr;
199 	struct rde_nbr		 new;
200 	struct rinfo		 rinfo;
201 	ssize_t			 n;
202 	int			 shut = 0, verbose;
203 
204 	ibuf = &iev->ibuf;
205 
206 	if (event & EV_READ) {
207 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
208 			fatal("imsg_read error");
209 		if (n == 0)	/* connection closed */
210 			shut = 1;
211 	}
212 	if (event & EV_WRITE) {
213 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
214 			fatal("msgbuf_write");
215 		if (n == 0)	/* connection closed */
216 			shut = 1;
217 	}
218 
219 	for (;;) {
220 		if ((n = imsg_get(ibuf, &imsg)) == -1)
221 			fatal("rde_dispatch_imsg: imsg_get error");
222 		if (n == 0)
223 			break;
224 
225 		switch (imsg.hdr.type) {
226 		case IMSG_NEIGHBOR_UP:
227 			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
228 			    sizeof(struct rde_nbr))
229 				fatalx("invalid size of neighbor request");
230 			memcpy(&new, imsg.data, sizeof(new));
231 
232 			if (rde_nbr_find(imsg.hdr.peerid))
233 				fatalx("rde_dispatch_imsg: "
234 				    "neighbor already exists");
235 			rde_nbr_new(imsg.hdr.peerid, &new);
236 			break;
237 		case IMSG_NEIGHBOR_DOWN:
238 			nbr = rde_nbr_find(imsg.hdr.peerid);
239 			if (nbr == NULL) {
240 				log_debug("%s: cannot find rde neighbor",
241 				    __func__);
242 				break;
243 			}
244 
245 			rde_check_link_down_nbr(nbr);
246 			rde_flush_queries();
247 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid), 0);
248 			break;
249 		case IMSG_RECV_UPDATE_INIT:
250 			nbr = rde_nbr_find(imsg.hdr.peerid);
251 			if (nbr == NULL) {
252 				log_debug("%s: cannot find rde neighbor",
253 				    __func__);
254 				break;
255 			}
256 
257 			rt_snap(nbr);
258 			break;
259 		case IMSG_RECV_UPDATE:
260 		case IMSG_RECV_QUERY:
261 		case IMSG_RECV_REPLY:
262 		case IMSG_RECV_SIAQUERY:
263 		case IMSG_RECV_SIAREPLY:
264 			nbr = rde_nbr_find(imsg.hdr.peerid);
265 			if (nbr == NULL) {
266 				log_debug("%s: cannot find rde neighbor",
267 				    __func__);
268 				break;
269 			}
270 
271 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rinfo))
272 				fatalx("invalid size of rinfo");
273 			memcpy(&rinfo, imsg.data, sizeof(rinfo));
274 
275 			switch (imsg.hdr.type) {
276 			case IMSG_RECV_UPDATE:
277 				rde_check_update(nbr, &rinfo);
278 				break;
279 			case IMSG_RECV_QUERY:
280 				rde_check_query(nbr, &rinfo, 0);
281 				break;
282 			case IMSG_RECV_REPLY:
283 				rde_check_reply(nbr, &rinfo, 0);
284 				break;
285 			case IMSG_RECV_SIAQUERY:
286 				rde_check_query(nbr, &rinfo, 1);
287 				break;
288 			case IMSG_RECV_SIAREPLY:
289 				rde_check_reply(nbr, &rinfo, 1);
290 				break;
291 			}
292 			break;
293 		case IMSG_CTL_SHOW_TOPOLOGY:
294 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
295 			    sizeof(struct ctl_show_topology_req)) {
296 				log_warnx("%s: wrong imsg len", __func__);
297 				break;
298 			}
299 
300 			rt_dump(imsg.data, imsg.hdr.pid);
301 			rde_imsg_compose_eigrpe(IMSG_CTL_END, 0, imsg.hdr.pid,
302 			    NULL, 0);
303 			break;
304 		case IMSG_CTL_LOG_VERBOSE:
305 			/* already checked by eigrpe */
306 			memcpy(&verbose, imsg.data, sizeof(verbose));
307 			log_verbose(verbose);
308 			break;
309 		default:
310 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
311 			    imsg.hdr.type);
312 			break;
313 		}
314 		imsg_free(&imsg);
315 	}
316 	if (!shut)
317 		imsg_event_add(iev);
318 	else {
319 		/* this pipe is dead, so remove the event handler */
320 		event_del(&iev->ev);
321 		event_loopexit(NULL);
322 	}
323 }
324 
325 /* ARGSUSED */
326 void
327 rde_dispatch_parent(int fd, short event, void *bula)
328 {
329 	struct iface		*niface = NULL;
330 	static struct eigrp	*neigrp;
331 	struct eigrp_iface	*nei;
332 	struct imsg		 imsg;
333 	struct imsgev		*iev = bula;
334 	struct imsgbuf		*ibuf;
335 	struct kif		*kif;
336 	ssize_t			 n;
337 	int			 shut = 0;
338 
339 	ibuf = &iev->ibuf;
340 
341 	if (event & EV_READ) {
342 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
343 			fatal("imsg_read error");
344 		if (n == 0)	/* connection closed */
345 			shut = 1;
346 	}
347 	if (event & EV_WRITE) {
348 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
349 			fatal("msgbuf_write");
350 		if (n == 0)	/* connection closed */
351 			shut = 1;
352 	}
353 
354 	for (;;) {
355 		if ((n = imsg_get(ibuf, &imsg)) == -1)
356 			fatal("rde_dispatch_parent: imsg_get error");
357 		if (n == 0)
358 			break;
359 
360 		switch (imsg.hdr.type) {
361 		case IMSG_IFDOWN:
362 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
363 			    sizeof(struct kif))
364 				fatalx("IFDOWN imsg with wrong len");
365 			kif = imsg.data;
366 			rde_check_link_down(kif->ifindex);
367 			break;
368 		case IMSG_NETWORK_ADD:
369 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
370 			    sizeof(struct kroute))
371 				fatalx("IMSG_NETWORK_ADD imsg with wrong len");
372 			rt_redist_set(imsg.data, 0);
373 			break;
374 		case IMSG_NETWORK_DEL:
375 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
376 			    sizeof(struct kroute))
377 				fatalx("IMSG_NETWORK_DEL imsg with wrong len");
378 			rt_redist_set(imsg.data, 1);
379 			break;
380 		case IMSG_RECONF_CONF:
381 			if ((nconf = malloc(sizeof(struct eigrpd_conf))) ==
382 			    NULL)
383 				fatal(NULL);
384 			memcpy(nconf, imsg.data, sizeof(struct eigrpd_conf));
385 
386 			TAILQ_INIT(&nconf->iface_list);
387 			TAILQ_INIT(&nconf->instances);
388 			break;
389 		case IMSG_RECONF_INSTANCE:
390 			if ((neigrp = malloc(sizeof(struct eigrp))) == NULL)
391 				fatal(NULL);
392 			memcpy(neigrp, imsg.data, sizeof(struct eigrp));
393 
394 			SIMPLEQ_INIT(&neigrp->redist_list);
395 			TAILQ_INIT(&neigrp->ei_list);
396 			RB_INIT(&neigrp->nbrs);
397 			RB_INIT(&neigrp->topology);
398 			TAILQ_INSERT_TAIL(&nconf->instances, neigrp, entry);
399 			break;
400 		case IMSG_RECONF_IFACE:
401 			niface = imsg.data;
402 			niface = if_lookup(nconf, niface->ifindex);
403 			if (niface)
404 				break;
405 
406 			if ((niface = malloc(sizeof(struct iface))) == NULL)
407 				fatal(NULL);
408 			memcpy(niface, imsg.data, sizeof(struct iface));
409 
410 			TAILQ_INIT(&niface->addr_list);
411 			TAILQ_INSERT_TAIL(&nconf->iface_list, niface, entry);
412 			break;
413 		case IMSG_RECONF_EIGRP_IFACE:
414 			if (niface == NULL)
415 				break;
416 			if ((nei = malloc(sizeof(struct eigrp_iface))) == NULL)
417 				fatal(NULL);
418 			memcpy(nei, imsg.data, sizeof(struct eigrp_iface));
419 
420 			nei->iface = niface;
421 			nei->eigrp = neigrp;
422 			TAILQ_INIT(&nei->nbr_list);
423 			TAILQ_INIT(&nei->update_list);
424 			TAILQ_INIT(&nei->query_list);
425 			TAILQ_INIT(&nei->summary_list);
426 			TAILQ_INSERT_TAIL(&niface->ei_list, nei, i_entry);
427 			TAILQ_INSERT_TAIL(&neigrp->ei_list, nei, e_entry);
428 			if (RB_INSERT(iface_id_head, &ifaces_by_id, nei) !=
429 			    NULL)
430 				fatalx("rde_dispatch_parent: "
431 				    "RB_INSERT(ifaces_by_id) failed");
432 			break;
433 		case IMSG_RECONF_END:
434 			merge_config(rdeconf, nconf);
435 			nconf = NULL;
436 			break;
437 		default:
438 			log_debug("%s: unexpected imsg %d", __func__,
439 			    imsg.hdr.type);
440 			break;
441 		}
442 		imsg_free(&imsg);
443 	}
444 	if (!shut)
445 		imsg_event_add(iev);
446 	else {
447 		/* this pipe is dead, so remove the event handler */
448 		event_del(&iev->ev);
449 		event_loopexit(NULL);
450 	}
451 }
452 
453 void
454 rde_instance_init(struct eigrp *eigrp)
455 {
456 	struct rde_nbr		 nbr;
457 
458 	memset(&nbr, 0, sizeof(nbr));
459 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_REDIST;
460 	eigrp->rnbr_redist = rde_nbr_new(NBR_IDSELF, &nbr);
461 	eigrp->rnbr_redist->eigrp = eigrp;
462 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_SUMMARY;
463 	eigrp->rnbr_summary = rde_nbr_new(NBR_IDSELF, &nbr);
464 	eigrp->rnbr_summary->eigrp = eigrp;
465 }
466 
467 void
468 rde_instance_del(struct eigrp *eigrp)
469 {
470 	struct rde_nbr		*nbr, *safe;
471 	struct rt_node		*rn;
472 
473 	/* clear topology */
474 	while((rn = RB_MIN(rt_tree, &eigrp->topology)) != NULL)
475 		rt_del(rn);
476 
477 	/* clear nbrs */
478 	RB_FOREACH_SAFE(nbr, rde_nbr_head, &rde_nbrs, safe)
479 		if (nbr->eigrp == eigrp)
480 			rde_nbr_del(nbr, 0);
481 	rde_nbr_del(eigrp->rnbr_redist, 0);
482 	rde_nbr_del(eigrp->rnbr_summary, 0);
483 
484 	free(eigrp);
485 }
486 
487 void
488 rde_send_change_kroute(struct rt_node *rn, struct eigrp_route *route)
489 {
490 	struct eigrp	*eigrp = route->nbr->eigrp;
491 	struct kroute	 kr;
492 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
493 
494 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
495 	    log_addr(eigrp->af, &route->nbr->addr));
496 
497 	memset(&kr, 0, sizeof(kr));
498 	kr.af = eigrp->af;
499 	memcpy(&kr.prefix, &rn->prefix, sizeof(kr.prefix));
500 	kr.prefixlen = rn->prefixlen;
501 	if (eigrp_addrisset(eigrp->af, &route->nexthop))
502 		memcpy(&kr.nexthop, &route->nexthop, sizeof(kr.nexthop));
503 	else
504 		memcpy(&kr.nexthop, &route->nbr->addr, sizeof(kr.nexthop));
505 	if (route->nbr->ei)
506 		kr.ifindex = route->nbr->ei->iface->ifindex;
507 	else {
508 		switch (eigrp->af) {
509 		case AF_INET:
510 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
511 			break;
512 		case AF_INET6:
513 			memcpy(&kr.nexthop.v6, &lo6, sizeof(kr.nexthop.v6));
514 			break;
515 		default:
516 			fatalx("rde_send_delete_kroute: unknown af");
517 			break;
518 		}
519 		kr.flags = F_BLACKHOLE;
520 	}
521 	if (route->type == EIGRP_ROUTE_EXTERNAL)
522 		kr.priority = rdeconf->fib_priority_external;
523 	else {
524 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
525 			kr.priority = rdeconf->fib_priority_summary;
526 		else
527 			kr.priority = rdeconf->fib_priority_internal;
528 	}
529 
530 	rde_imsg_compose_parent(IMSG_KROUTE_CHANGE, 0, &kr, sizeof(kr));
531 
532 	route->flags |= F_EIGRP_ROUTE_INSTALLED;
533 }
534 
535 void
536 rde_send_delete_kroute(struct rt_node *rn, struct eigrp_route *route)
537 {
538 	struct eigrp	*eigrp = route->nbr->eigrp;
539 	struct kroute	 kr;
540 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
541 
542 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
543 	    log_addr(eigrp->af, &route->nbr->addr));
544 
545 	memset(&kr, 0, sizeof(kr));
546 	kr.af = eigrp->af;
547 	memcpy(&kr.prefix, &rn->prefix, sizeof(kr.prefix));
548 	kr.prefixlen = rn->prefixlen;
549 	if (eigrp_addrisset(eigrp->af, &route->nexthop))
550 		memcpy(&kr.nexthop, &route->nexthop, sizeof(kr.nexthop));
551 	else
552 		memcpy(&kr.nexthop, &route->nbr->addr, sizeof(kr.nexthop));
553 	if (route->nbr->ei)
554 		kr.ifindex = route->nbr->ei->iface->ifindex;
555 	else {
556 		switch (eigrp->af) {
557 		case AF_INET:
558 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
559 			break;
560 		case AF_INET6:
561 			memcpy(&kr.nexthop.v6, &lo6, sizeof(kr.nexthop.v6));
562 			break;
563 		default:
564 			fatalx("rde_send_delete_kroute: unknown af");
565 			break;
566 		}
567 		kr.flags = F_BLACKHOLE;
568 	}
569 	if (route->type == EIGRP_ROUTE_EXTERNAL)
570 		kr.priority = rdeconf->fib_priority_external;
571 	else {
572 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
573 			kr.priority = rdeconf->fib_priority_summary;
574 		else
575 			kr.priority = rdeconf->fib_priority_internal;
576 	}
577 
578 	rde_imsg_compose_parent(IMSG_KROUTE_DELETE, 0, &kr, sizeof(kr));
579 
580 	route->flags &= ~F_EIGRP_ROUTE_INSTALLED;
581 }
582 
583 static struct redistribute *
584 eigrp_redistribute(struct eigrp *eigrp, struct kroute *kr)
585 {
586 	struct redistribute	*r;
587 	uint8_t			 is_default = 0;
588 	union eigrpd_addr	 addr;
589 
590 	/* only allow the default route via REDIST_DEFAULT */
591 	if (!eigrp_addrisset(kr->af, &kr->prefix) && kr->prefixlen == 0)
592 		is_default = 1;
593 
594 	SIMPLEQ_FOREACH(r, &eigrp->redist_list, entry) {
595 		switch (r->type & ~REDIST_NO) {
596 		case REDIST_STATIC:
597 			if (is_default)
598 				continue;
599 			if (kr->flags & F_STATIC)
600 				return (r->type & REDIST_NO ? NULL : r);
601 			break;
602 		case REDIST_RIP:
603 			if (is_default)
604 				continue;
605 			if (kr->priority == RTP_RIP)
606 				return (r->type & REDIST_NO ? NULL : r);
607 			break;
608 		case REDIST_OSPF:
609 			if (is_default)
610 				continue;
611 			if (kr->priority == RTP_OSPF)
612 				return (r->type & REDIST_NO ? NULL : r);
613 			break;
614 		case REDIST_CONNECTED:
615 			if (is_default)
616 				continue;
617 			if (kr->flags & F_CONNECTED)
618 				return (r->type & REDIST_NO ? NULL : r);
619 			break;
620 		case REDIST_ADDR:
621 			if (eigrp_addrisset(r->af, &r->addr) &&
622 			    r->prefixlen == 0) {
623 				if (is_default)
624 					return (r->type & REDIST_NO ? NULL : r);
625 				else
626 					return (0);
627 			}
628 
629 			eigrp_applymask(kr->af, &addr, &kr->prefix,
630 			    r->prefixlen);
631 			if (eigrp_addrcmp(kr->af, &addr, &r->addr) == 0 &&
632 			    kr->prefixlen >= r->prefixlen)
633 				return (r->type & REDIST_NO ? NULL : r);
634 			break;
635 		case REDIST_DEFAULT:
636 			if (is_default)
637 				return (r->type & REDIST_NO ? NULL : r);
638 			break;
639 		}
640 	}
641 
642 	return (NULL);
643 }
644 
645 void
646 rt_redist_set(struct kroute *kr, int withdraw)
647 {
648 	struct eigrp		*eigrp;
649 	struct redistribute	*r;
650 	struct redist_metric	*rmetric;
651 	struct rinfo		 ri;
652 
653 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
654 		if (eigrp->af != kr->af)
655 			continue;
656 
657 		r = eigrp_redistribute(eigrp, kr);
658 		if (r == NULL)
659 			continue;
660 
661 		if (r->metric)
662 			rmetric = r->metric;
663 		else if (eigrp->dflt_metric)
664 			rmetric = eigrp->dflt_metric;
665 		else
666 			continue;
667 
668 		memset(&ri, 0, sizeof(ri));
669 		ri.af = kr->af;
670 		ri.type = EIGRP_ROUTE_EXTERNAL;
671 		memcpy(&ri.prefix, &kr->prefix, sizeof(ri.prefix));
672 		ri.prefixlen = kr->prefixlen;
673 
674 		/* metric */
675 		if (withdraw)
676 			ri.metric.delay = EIGRP_INFINITE_METRIC;
677 		else
678 			ri.metric.delay = eigrp_composite_delay(rmetric->delay);
679 		ri.metric.bandwidth =
680 		    eigrp_composite_bandwidth(rmetric->bandwidth);
681 		metric_encode_mtu(ri.metric.mtu, rmetric->mtu);
682 		ri.metric.hop_count = 0;
683 		ri.metric.reliability = rmetric->reliability;
684 		ri.metric.load = rmetric->load;
685 		ri.metric.tag = 0;
686 		ri.metric.flags = 0;
687 
688 		/* external metric */
689 		ri.emetric.routerid = htonl(eigrp_router_id(rdeconf));
690 		ri.emetric.as = r->emetric.as;
691 		ri.emetric.tag = r->emetric.tag;
692 		ri.emetric.metric = r->emetric.metric;
693 		if (kr->priority == rdeconf->fib_priority_internal)
694 			ri.emetric.protocol = EIGRP_EXT_PROTO_EIGRP;
695 		else if (kr->priority == RTP_STATIC)
696 			ri.emetric.protocol = EIGRP_EXT_PROTO_STATIC;
697 		else if (kr->priority == RTP_RIP)
698 			ri.emetric.protocol = EIGRP_EXT_PROTO_RIP;
699 		else if (kr->priority == RTP_OSPF)
700 			ri.emetric.protocol = EIGRP_EXT_PROTO_OSPF;
701 		else
702 			ri.emetric.protocol = EIGRP_EXT_PROTO_CONN;
703 		ri.emetric.flags = 0;
704 
705 		rde_check_update(eigrp->rnbr_redist, &ri);
706 	}
707 }
708 
709 void
710 rt_summary_set(struct eigrp *eigrp, struct summary_addr *summary,
711     struct classic_metric *metric)
712 {
713 	struct rinfo		 ri;
714 
715 	memset(&ri, 0, sizeof(ri));
716 	ri.af = eigrp->af;
717 	ri.type = EIGRP_ROUTE_INTERNAL;
718 	memcpy(&ri.prefix, &summary->prefix, sizeof(ri.prefix));
719 	ri.prefixlen = summary->prefixlen;
720 	memcpy(&ri.metric, metric, sizeof(ri.metric));
721 
722 	rde_check_update(eigrp->rnbr_summary, &ri);
723 }
724 
725 /* send all known routing information to new neighbor */
726 void
727 rt_snap(struct rde_nbr *nbr)
728 {
729 	struct eigrp		*eigrp = nbr->eigrp;
730 	struct rt_node		*rn;
731 	struct rinfo		 ri;
732 
733 	RB_FOREACH(rn, rt_tree, &eigrp->topology)
734 		if (rn->state == DUAL_STA_PASSIVE &&
735 		    !rde_summary_check(nbr->ei, &rn->prefix, rn->prefixlen)) {
736 			rinfo_fill_successor(rn, &ri);
737 			rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE,
738 			    nbr->peerid, 0, &ri, sizeof(ri));
739 		}
740 
741 	rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE_END, nbr->peerid, 0,
742 	    NULL, 0);
743 }
744 
745 struct ctl_rt *
746 rt_to_ctl(struct rt_node *rn, struct eigrp_route *route)
747 {
748 	static struct ctl_rt	 rtctl;
749 
750 	memset(&rtctl, 0, sizeof(rtctl));
751 	rtctl.af = route->nbr->eigrp->af;
752 	rtctl.as = route->nbr->eigrp->as;
753 	memcpy(&rtctl.prefix, &rn->prefix, sizeof(rtctl.prefix));
754 	rtctl.prefixlen = rn->prefixlen;
755 	rtctl.type = route->type;
756 	memcpy(&rtctl.nexthop, &route->nbr->addr, sizeof(rtctl.nexthop));
757 	if (route->nbr->flags & F_RDE_NBR_REDIST)
758 		strlcpy(rtctl.ifname, "redistribute", sizeof(rtctl.ifname));
759 	else if (route->nbr->flags & F_RDE_NBR_SUMMARY)
760 		strlcpy(rtctl.ifname, "summary", sizeof(rtctl.ifname));
761 	else
762 		memcpy(rtctl.ifname, route->nbr->ei->iface->name,
763 		    sizeof(rtctl.ifname));
764 	rtctl.distance = route->distance;
765 	rtctl.rdistance = route->rdistance;
766 	rtctl.fdistance = rn->successor.fdistance;
767 	rtctl.state = rn->state;
768 	/* metric */
769 	rtctl.metric.delay = eigrp_real_delay(route->metric.delay);
770 	/* translate to microseconds */
771 	rtctl.metric.delay *= 10;
772 	rtctl.metric.bandwidth = eigrp_real_bandwidth(route->metric.bandwidth);
773 	rtctl.metric.mtu = metric_decode_mtu(route->metric.mtu);
774 	rtctl.metric.hop_count = route->metric.hop_count;
775 	rtctl.metric.reliability = route->metric.reliability;
776 	rtctl.metric.load = route->metric.load;
777 	/* external metric */
778 	memcpy(&rtctl.emetric, &route->emetric, sizeof(rtctl.emetric));
779 
780 	if (route->nbr == rn->successor.nbr)
781 		rtctl.flags |= F_CTL_RT_SUCCESSOR;
782 	else if (route->rdistance < rn->successor.fdistance)
783 		rtctl.flags |= F_CTL_RT_FSUCCESSOR;
784 
785 	return (&rtctl);
786 }
787 
788 void
789 rt_dump(struct ctl_show_topology_req *treq, pid_t pid)
790 {
791 	struct eigrp		*eigrp;
792 	struct rt_node		*rn;
793 	struct eigrp_route	*route;
794 	struct ctl_rt		*rtctl;
795 	int			 first = 1;
796 
797 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
798 		RB_FOREACH(rn, rt_tree, &eigrp->topology) {
799 			if (eigrp_addrisset(treq->af, &treq->prefix) &&
800 			    eigrp_addrcmp(treq->af, &treq->prefix,
801 			    &rn->prefix))
802 				continue;
803 
804 			if (treq->prefixlen &&
805 			    (treq->prefixlen != rn->prefixlen))
806 				continue;
807 
808 			first = 1;
809 			TAILQ_FOREACH(route, &rn->routes, entry) {
810 				if (treq->flags & F_CTL_ACTIVE &&
811 				    !(rn->state & DUAL_STA_ACTIVE_ALL))
812 					continue;
813 				if (!(treq->flags & F_CTL_ALLLINKS) &&
814 				    route->rdistance >= rn->successor.fdistance)
815 					continue;
816 
817 				rtctl = rt_to_ctl(rn, route);
818 				if (first) {
819 					rtctl->flags |= F_CTL_RT_FIRST;
820 					first = 0;
821 				}
822 				rde_imsg_compose_eigrpe(IMSG_CTL_SHOW_TOPOLOGY,
823 				    0, pid, rtctl, sizeof(*rtctl));
824 			}
825 		}
826 	}
827 }
828