xref: /openbsd/usr.sbin/eigrpd/rde.c (revision 95e9ede3)
1 /*	$OpenBSD: rde.c,v 1.26 2023/12/14 11:09:56 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/types.h>
23 #include <sys/socket.h>
24 #include <net/route.h>
25 
26 #include <errno.h>
27 #include <pwd.h>
28 #include <signal.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <unistd.h>
32 
33 #include "eigrpd.h"
34 #include "eigrpe.h"
35 #include "rde.h"
36 #include "log.h"
37 
38 static void		 rde_sig_handler(int sig, short, void *);
39 static __dead void	 rde_shutdown(void);
40 static void		 rde_dispatch_imsg(int, short, void *);
41 static void		 rde_dispatch_parent(int, short, void *);
42 static struct redistribute *eigrp_redistribute(struct eigrp *, struct kroute *);
43 static void		 rt_redist_set(struct kroute *, int);
44 static void		 rt_snap(struct rde_nbr *);
45 static struct ctl_rt	*rt_to_ctl(struct rt_node *, struct eigrp_route *);
46 static void		 rt_dump(struct ctl_show_topology_req *, pid_t);
47 
48 struct eigrpd_conf	*rdeconf;
49 
50 static struct imsgev	*iev_eigrpe;
51 static struct imsgev	*iev_main;
52 
53 static void
rde_sig_handler(int sig,short event,void * arg)54 rde_sig_handler(int sig, short event, void *arg)
55 {
56 	/*
57 	 * signal handler rules don't apply, libevent decouples for us
58 	 */
59 
60 	switch (sig) {
61 	case SIGINT:
62 	case SIGTERM:
63 		rde_shutdown();
64 		/* NOTREACHED */
65 	default:
66 		fatalx("unexpected signal");
67 	}
68 }
69 
70 /* route decision engine */
71 void
rde(int debug,int verbose)72 rde(int debug, int verbose)
73 {
74 	struct event		 ev_sigint, ev_sigterm;
75 	struct timeval		 now;
76 	struct passwd		*pw;
77 
78 	rdeconf = config_new_empty();
79 
80 	log_init(debug);
81 	log_verbose(verbose);
82 
83 	if ((pw = getpwnam(EIGRPD_USER)) == NULL)
84 		fatal("getpwnam");
85 
86 	if (chroot(pw->pw_dir) == -1)
87 		fatal("chroot");
88 	if (chdir("/") == -1)
89 		fatal("chdir(\"/\")");
90 
91 	setproctitle("route decision engine");
92 	log_procname = "rde";
93 
94 	if (setgroups(1, &pw->pw_gid) ||
95 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
96 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
97 		fatal("can't drop privileges");
98 
99 	if (pledge("stdio recvfd", NULL) == -1)
100 		fatal("pledge");
101 
102 	event_init();
103 
104 	/* setup signal handler */
105 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
106 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
107 	signal_add(&ev_sigint, NULL);
108 	signal_add(&ev_sigterm, NULL);
109 	signal(SIGPIPE, SIG_IGN);
110 	signal(SIGHUP, SIG_IGN);
111 
112 	/* setup pipe and event handler to the parent process */
113 	if ((iev_main = malloc(sizeof(struct imsgev))) == NULL)
114 		fatal(NULL);
115 	imsg_init(&iev_main->ibuf, 3);
116 	iev_main->handler = rde_dispatch_parent;
117 	iev_main->events = EV_READ;
118 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
119 	    iev_main->handler, iev_main);
120 	event_add(&iev_main->ev, NULL);
121 
122 	gettimeofday(&now, NULL);
123 	global.uptime = now.tv_sec;
124 
125 	event_dispatch();
126 
127 	rde_shutdown();
128 }
129 
130 static __dead void
rde_shutdown(void)131 rde_shutdown(void)
132 {
133 	/* close pipes */
134 	msgbuf_clear(&iev_eigrpe->ibuf.w);
135 	close(iev_eigrpe->ibuf.fd);
136 	msgbuf_clear(&iev_main->ibuf.w);
137 	close(iev_main->ibuf.fd);
138 
139 	config_clear(rdeconf, PROC_RDE_ENGINE);
140 
141 	free(iev_eigrpe);
142 	free(iev_main);
143 
144 	log_info("route decision engine exiting");
145 	exit(0);
146 }
147 
148 int
rde_imsg_compose_parent(int type,pid_t pid,void * data,uint16_t datalen)149 rde_imsg_compose_parent(int type, pid_t pid, void *data, uint16_t datalen)
150 {
151 	return (imsg_compose_event(iev_main, type, 0, pid, -1,
152 	    data, datalen));
153 }
154 
155 int
rde_imsg_compose_eigrpe(int type,uint32_t peerid,pid_t pid,void * data,uint16_t datalen)156 rde_imsg_compose_eigrpe(int type, uint32_t peerid, pid_t pid, void *data,
157     uint16_t datalen)
158 {
159 	return (imsg_compose_event(iev_eigrpe, type, peerid, pid, -1,
160 	    data, datalen));
161 }
162 
163 static void
rde_dispatch_imsg(int fd,short event,void * bula)164 rde_dispatch_imsg(int fd, short event, void *bula)
165 {
166 	struct imsgev		*iev = bula;
167 	struct imsgbuf		*ibuf;
168 	struct imsg		 imsg;
169 	struct rde_nbr		*nbr;
170 	struct rde_nbr		 new;
171 	struct rinfo		 rinfo;
172 	ssize_t			 n;
173 	int			 shut = 0, verbose;
174 
175 	ibuf = &iev->ibuf;
176 
177 	if (event & EV_READ) {
178 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
179 			fatal("imsg_read error");
180 		if (n == 0)	/* connection closed */
181 			shut = 1;
182 	}
183 	if (event & EV_WRITE) {
184 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
185 			fatal("msgbuf_write");
186 		if (n == 0)	/* connection closed */
187 			shut = 1;
188 	}
189 
190 	for (;;) {
191 		if ((n = imsg_get(ibuf, &imsg)) == -1)
192 			fatal("rde_dispatch_imsg: imsg_get error");
193 		if (n == 0)
194 			break;
195 
196 		switch (imsg.hdr.type) {
197 		case IMSG_NEIGHBOR_UP:
198 			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
199 			    sizeof(struct rde_nbr))
200 				fatalx("invalid size of neighbor request");
201 			memcpy(&new, imsg.data, sizeof(new));
202 
203 			if (rde_nbr_find(imsg.hdr.peerid))
204 				fatalx("rde_dispatch_imsg: "
205 				    "neighbor already exists");
206 			rde_nbr_new(imsg.hdr.peerid, &new);
207 			break;
208 		case IMSG_NEIGHBOR_DOWN:
209 			nbr = rde_nbr_find(imsg.hdr.peerid);
210 			if (nbr == NULL) {
211 				log_debug("%s: cannot find rde neighbor",
212 				    __func__);
213 				break;
214 			}
215 
216 			rde_check_link_down_nbr(nbr);
217 			rde_flush_queries();
218 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid), 0);
219 			break;
220 		case IMSG_RECV_UPDATE_INIT:
221 			nbr = rde_nbr_find(imsg.hdr.peerid);
222 			if (nbr == NULL) {
223 				log_debug("%s: cannot find rde neighbor",
224 				    __func__);
225 				break;
226 			}
227 
228 			rt_snap(nbr);
229 			break;
230 		case IMSG_RECV_UPDATE:
231 		case IMSG_RECV_QUERY:
232 		case IMSG_RECV_REPLY:
233 		case IMSG_RECV_SIAQUERY:
234 		case IMSG_RECV_SIAREPLY:
235 			nbr = rde_nbr_find(imsg.hdr.peerid);
236 			if (nbr == NULL) {
237 				log_debug("%s: cannot find rde neighbor",
238 				    __func__);
239 				break;
240 			}
241 
242 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rinfo))
243 				fatalx("invalid size of rinfo");
244 			memcpy(&rinfo, imsg.data, sizeof(rinfo));
245 
246 			switch (imsg.hdr.type) {
247 			case IMSG_RECV_UPDATE:
248 				rde_check_update(nbr, &rinfo);
249 				break;
250 			case IMSG_RECV_QUERY:
251 				rde_check_query(nbr, &rinfo, 0);
252 				break;
253 			case IMSG_RECV_REPLY:
254 				rde_check_reply(nbr, &rinfo, 0);
255 				break;
256 			case IMSG_RECV_SIAQUERY:
257 				rde_check_query(nbr, &rinfo, 1);
258 				break;
259 			case IMSG_RECV_SIAREPLY:
260 				rde_check_reply(nbr, &rinfo, 1);
261 				break;
262 			}
263 			break;
264 		case IMSG_CTL_SHOW_TOPOLOGY:
265 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
266 			    sizeof(struct ctl_show_topology_req)) {
267 				log_warnx("%s: wrong imsg len", __func__);
268 				break;
269 			}
270 
271 			rt_dump(imsg.data, imsg.hdr.pid);
272 			rde_imsg_compose_eigrpe(IMSG_CTL_END, 0, imsg.hdr.pid,
273 			    NULL, 0);
274 			break;
275 		case IMSG_CTL_LOG_VERBOSE:
276 			/* already checked by eigrpe */
277 			memcpy(&verbose, imsg.data, sizeof(verbose));
278 			log_verbose(verbose);
279 			break;
280 		default:
281 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
282 			    imsg.hdr.type);
283 			break;
284 		}
285 		imsg_free(&imsg);
286 	}
287 	if (!shut)
288 		imsg_event_add(iev);
289 	else {
290 		/* this pipe is dead, so remove the event handler */
291 		event_del(&iev->ev);
292 		event_loopexit(NULL);
293 	}
294 }
295 
296 static void
rde_dispatch_parent(int fd,short event,void * bula)297 rde_dispatch_parent(int fd, short event, void *bula)
298 {
299 	static struct eigrpd_conf *nconf;
300 	static struct iface	*niface;
301 	static struct eigrp	*neigrp;
302 	struct eigrp_iface	*nei;
303 	struct imsg		 imsg;
304 	struct imsgev		*iev = bula;
305 	struct imsgbuf		*ibuf;
306 	struct kif		*kif;
307 	ssize_t			 n;
308 	int			 shut = 0;
309 
310 	ibuf = &iev->ibuf;
311 
312 	if (event & EV_READ) {
313 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
314 			fatal("imsg_read error");
315 		if (n == 0)	/* connection closed */
316 			shut = 1;
317 	}
318 	if (event & EV_WRITE) {
319 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
320 			fatal("msgbuf_write");
321 		if (n == 0)	/* connection closed */
322 			shut = 1;
323 	}
324 
325 	for (;;) {
326 		if ((n = imsg_get(ibuf, &imsg)) == -1)
327 			fatal("rde_dispatch_parent: imsg_get error");
328 		if (n == 0)
329 			break;
330 
331 		switch (imsg.hdr.type) {
332 		case IMSG_IFDOWN:
333 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
334 			    sizeof(struct kif))
335 				fatalx("IFDOWN imsg with wrong len");
336 			kif = imsg.data;
337 			rde_check_link_down(kif->ifindex);
338 			break;
339 		case IMSG_NETWORK_ADD:
340 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
341 			    sizeof(struct kroute))
342 				fatalx("IMSG_NETWORK_ADD imsg with wrong len");
343 			rt_redist_set(imsg.data, 0);
344 			break;
345 		case IMSG_NETWORK_DEL:
346 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
347 			    sizeof(struct kroute))
348 				fatalx("IMSG_NETWORK_DEL imsg with wrong len");
349 			rt_redist_set(imsg.data, 1);
350 			break;
351 		case IMSG_SOCKET_IPC:
352 			if (iev_eigrpe) {
353 				log_warnx("%s: received unexpected imsg fd "
354 				    "to eigrpe", __func__);
355 				break;
356 			}
357 			if ((fd = imsg_get_fd(&imsg)) == -1) {
358 				log_warnx("%s: expected to receive imsg fd to "
359 				    "eigrpe but didn't receive any", __func__);
360 				break;
361 			}
362 
363 			iev_eigrpe = malloc(sizeof(struct imsgev));
364 			if (iev_eigrpe == NULL)
365 				fatal(NULL);
366 			imsg_init(&iev_eigrpe->ibuf, fd);
367 			iev_eigrpe->handler = rde_dispatch_imsg;
368 			iev_eigrpe->events = EV_READ;
369 			event_set(&iev_eigrpe->ev, iev_eigrpe->ibuf.fd,
370 			    iev_eigrpe->events, iev_eigrpe->handler,
371 			    iev_eigrpe);
372 			event_add(&iev_eigrpe->ev, NULL);
373 			break;
374 		case IMSG_RECONF_CONF:
375 			if ((nconf = malloc(sizeof(struct eigrpd_conf))) ==
376 			    NULL)
377 				fatal(NULL);
378 			memcpy(nconf, imsg.data, sizeof(struct eigrpd_conf));
379 
380 			TAILQ_INIT(&nconf->iface_list);
381 			TAILQ_INIT(&nconf->instances);
382 			break;
383 		case IMSG_RECONF_INSTANCE:
384 			if ((neigrp = malloc(sizeof(struct eigrp))) == NULL)
385 				fatal(NULL);
386 			memcpy(neigrp, imsg.data, sizeof(struct eigrp));
387 
388 			SIMPLEQ_INIT(&neigrp->redist_list);
389 			TAILQ_INIT(&neigrp->ei_list);
390 			RB_INIT(&neigrp->nbrs);
391 			RB_INIT(&neigrp->topology);
392 			TAILQ_INSERT_TAIL(&nconf->instances, neigrp, entry);
393 			break;
394 		case IMSG_RECONF_IFACE:
395 			niface = imsg.data;
396 			niface = if_lookup(nconf, niface->ifindex);
397 			if (niface)
398 				break;
399 
400 			if ((niface = malloc(sizeof(struct iface))) == NULL)
401 				fatal(NULL);
402 			memcpy(niface, imsg.data, sizeof(struct iface));
403 
404 			TAILQ_INIT(&niface->ei_list);
405 			TAILQ_INIT(&niface->addr_list);
406 			TAILQ_INSERT_TAIL(&nconf->iface_list, niface, entry);
407 			break;
408 		case IMSG_RECONF_EIGRP_IFACE:
409 			if (niface == NULL)
410 				break;
411 			if ((nei = malloc(sizeof(struct eigrp_iface))) == NULL)
412 				fatal(NULL);
413 			memcpy(nei, imsg.data, sizeof(struct eigrp_iface));
414 
415 			nei->iface = niface;
416 			nei->eigrp = neigrp;
417 			TAILQ_INIT(&nei->nbr_list);
418 			TAILQ_INIT(&nei->update_list);
419 			TAILQ_INIT(&nei->query_list);
420 			TAILQ_INIT(&nei->summary_list);
421 			TAILQ_INSERT_TAIL(&niface->ei_list, nei, i_entry);
422 			TAILQ_INSERT_TAIL(&neigrp->ei_list, nei, e_entry);
423 			if (RB_INSERT(iface_id_head, &ifaces_by_id, nei) !=
424 			    NULL)
425 				fatalx("rde_dispatch_parent: "
426 				    "RB_INSERT(ifaces_by_id) failed");
427 			break;
428 		case IMSG_RECONF_END:
429 			merge_config(rdeconf, nconf, PROC_RDE_ENGINE);
430 			nconf = NULL;
431 			break;
432 		default:
433 			log_debug("%s: unexpected imsg %d", __func__,
434 			    imsg.hdr.type);
435 			break;
436 		}
437 		imsg_free(&imsg);
438 	}
439 	if (!shut)
440 		imsg_event_add(iev);
441 	else {
442 		/* this pipe is dead, so remove the event handler */
443 		event_del(&iev->ev);
444 		event_loopexit(NULL);
445 	}
446 }
447 
448 void
rde_instance_init(struct eigrp * eigrp)449 rde_instance_init(struct eigrp *eigrp)
450 {
451 	struct rde_nbr		 nbr;
452 
453 	memset(&nbr, 0, sizeof(nbr));
454 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_REDIST;
455 	eigrp->rnbr_redist = rde_nbr_new(NBR_IDSELF, &nbr);
456 	eigrp->rnbr_redist->eigrp = eigrp;
457 	nbr.flags = F_RDE_NBR_SELF | F_RDE_NBR_SUMMARY;
458 	eigrp->rnbr_summary = rde_nbr_new(NBR_IDSELF, &nbr);
459 	eigrp->rnbr_summary->eigrp = eigrp;
460 }
461 
462 void
rde_instance_del(struct eigrp * eigrp)463 rde_instance_del(struct eigrp *eigrp)
464 {
465 	struct rde_nbr		*nbr, *safe;
466 	struct rt_node		*rn;
467 
468 	/* clear topology */
469 	while((rn = RB_MIN(rt_tree, &eigrp->topology)) != NULL)
470 		rt_del(rn);
471 
472 	/* clear nbrs */
473 	RB_FOREACH_SAFE(nbr, rde_nbr_head, &rde_nbrs, safe)
474 		if (nbr->eigrp == eigrp)
475 			rde_nbr_del(nbr, 0);
476 	rde_nbr_del(eigrp->rnbr_redist, 0);
477 	rde_nbr_del(eigrp->rnbr_summary, 0);
478 
479 	free(eigrp);
480 }
481 
482 void
rde_send_change_kroute(struct rt_node * rn,struct eigrp_route * route)483 rde_send_change_kroute(struct rt_node *rn, struct eigrp_route *route)
484 {
485 	struct eigrp	*eigrp = route->nbr->eigrp;
486 	struct kroute	 kr;
487 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
488 
489 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
490 	    log_addr(eigrp->af, &route->nbr->addr));
491 
492 	memset(&kr, 0, sizeof(kr));
493 	kr.af = eigrp->af;
494 	kr.prefix = rn->prefix;
495 	kr.prefixlen = rn->prefixlen;
496 	if (route->nbr->ei) {
497 		kr.nexthop = route->nexthop;
498 		kr.ifindex = route->nbr->ei->iface->ifindex;
499 	} else {
500 		switch (eigrp->af) {
501 		case AF_INET:
502 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
503 			break;
504 		case AF_INET6:
505 			kr.nexthop.v6 = lo6;
506 			break;
507 		default:
508 			fatalx("rde_send_delete_kroute: unknown af");
509 			break;
510 		}
511 		kr.flags = F_BLACKHOLE;
512 	}
513 	if (route->type == EIGRP_ROUTE_EXTERNAL)
514 		kr.priority = rdeconf->fib_priority_external;
515 	else {
516 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
517 			kr.priority = rdeconf->fib_priority_summary;
518 		else
519 			kr.priority = rdeconf->fib_priority_internal;
520 	}
521 
522 	rde_imsg_compose_parent(IMSG_KROUTE_CHANGE, 0, &kr, sizeof(kr));
523 
524 	route->flags |= F_EIGRP_ROUTE_INSTALLED;
525 }
526 
527 void
rde_send_delete_kroute(struct rt_node * rn,struct eigrp_route * route)528 rde_send_delete_kroute(struct rt_node *rn, struct eigrp_route *route)
529 {
530 	struct eigrp	*eigrp = route->nbr->eigrp;
531 	struct kroute	 kr;
532 	struct in6_addr	 lo6 = IN6ADDR_LOOPBACK_INIT;
533 
534 	log_debug("%s: %s nbr %s", __func__, log_prefix(rn),
535 	    log_addr(eigrp->af, &route->nbr->addr));
536 
537 	memset(&kr, 0, sizeof(kr));
538 	kr.af = eigrp->af;
539 	kr.prefix = rn->prefix;
540 	kr.prefixlen = rn->prefixlen;
541 	if (route->nbr->ei) {
542 		kr.nexthop = route->nexthop;
543 		kr.ifindex = route->nbr->ei->iface->ifindex;
544 	} else {
545 		switch (eigrp->af) {
546 		case AF_INET:
547 			kr.nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
548 			break;
549 		case AF_INET6:
550 			kr.nexthop.v6 = lo6;
551 			break;
552 		default:
553 			fatalx("rde_send_delete_kroute: unknown af");
554 			break;
555 		}
556 		kr.flags = F_BLACKHOLE;
557 	}
558 	if (route->type == EIGRP_ROUTE_EXTERNAL)
559 		kr.priority = rdeconf->fib_priority_external;
560 	else {
561 		if (route->nbr->flags & F_RDE_NBR_SUMMARY)
562 			kr.priority = rdeconf->fib_priority_summary;
563 		else
564 			kr.priority = rdeconf->fib_priority_internal;
565 	}
566 
567 	rde_imsg_compose_parent(IMSG_KROUTE_DELETE, 0, &kr, sizeof(kr));
568 
569 	route->flags &= ~F_EIGRP_ROUTE_INSTALLED;
570 }
571 
572 static struct redistribute *
eigrp_redistribute(struct eigrp * eigrp,struct kroute * kr)573 eigrp_redistribute(struct eigrp *eigrp, struct kroute *kr)
574 {
575 	struct redistribute	*r;
576 	uint8_t			 is_default = 0;
577 	union eigrpd_addr	 addr;
578 
579 	/* only allow the default route via REDIST_DEFAULT */
580 	if (!eigrp_addrisset(kr->af, &kr->prefix) && kr->prefixlen == 0)
581 		is_default = 1;
582 
583 	SIMPLEQ_FOREACH(r, &eigrp->redist_list, entry) {
584 		switch (r->type & ~REDIST_NO) {
585 		case REDIST_STATIC:
586 			if (is_default)
587 				continue;
588 			if (kr->flags & F_STATIC)
589 				return (r->type & REDIST_NO ? NULL : r);
590 			break;
591 		case REDIST_RIP:
592 			if (is_default)
593 				continue;
594 			if (kr->priority == RTP_RIP)
595 				return (r->type & REDIST_NO ? NULL : r);
596 			break;
597 		case REDIST_OSPF:
598 			if (is_default)
599 				continue;
600 			if (kr->priority == RTP_OSPF)
601 				return (r->type & REDIST_NO ? NULL : r);
602 			break;
603 		case REDIST_CONNECTED:
604 			if (is_default)
605 				continue;
606 			if (kr->flags & F_CONNECTED)
607 				return (r->type & REDIST_NO ? NULL : r);
608 			break;
609 		case REDIST_ADDR:
610 			if (eigrp_addrisset(r->af, &r->addr) &&
611 			    r->prefixlen == 0) {
612 				if (is_default)
613 					return (r->type & REDIST_NO ? NULL : r);
614 				else
615 					return (0);
616 			}
617 
618 			eigrp_applymask(kr->af, &addr, &kr->prefix,
619 			    r->prefixlen);
620 			if (eigrp_addrcmp(kr->af, &addr, &r->addr) == 0 &&
621 			    kr->prefixlen >= r->prefixlen)
622 				return (r->type & REDIST_NO ? NULL : r);
623 			break;
624 		case REDIST_DEFAULT:
625 			if (is_default)
626 				return (r->type & REDIST_NO ? NULL : r);
627 			break;
628 		}
629 	}
630 
631 	return (NULL);
632 }
633 
634 static void
rt_redist_set(struct kroute * kr,int withdraw)635 rt_redist_set(struct kroute *kr, int withdraw)
636 {
637 	struct eigrp		*eigrp;
638 	struct redistribute	*r;
639 	struct redist_metric	*rmetric;
640 	struct rinfo		 ri;
641 
642 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
643 		if (eigrp->af != kr->af)
644 			continue;
645 
646 		r = eigrp_redistribute(eigrp, kr);
647 		if (r == NULL)
648 			continue;
649 
650 		if (r->metric)
651 			rmetric = r->metric;
652 		else if (eigrp->dflt_metric)
653 			rmetric = eigrp->dflt_metric;
654 		else
655 			continue;
656 
657 		memset(&ri, 0, sizeof(ri));
658 		ri.af = kr->af;
659 		ri.type = EIGRP_ROUTE_EXTERNAL;
660 		ri.prefix = kr->prefix;
661 		ri.prefixlen = kr->prefixlen;
662 
663 		/* metric */
664 		if (withdraw)
665 			ri.metric.delay = EIGRP_INFINITE_METRIC;
666 		else
667 			ri.metric.delay = eigrp_composite_delay(rmetric->delay);
668 		ri.metric.bandwidth =
669 		    eigrp_composite_bandwidth(rmetric->bandwidth);
670 		metric_encode_mtu(ri.metric.mtu, rmetric->mtu);
671 		ri.metric.hop_count = 0;
672 		ri.metric.reliability = rmetric->reliability;
673 		ri.metric.load = rmetric->load;
674 		ri.metric.tag = 0;
675 		ri.metric.flags = 0;
676 
677 		/* external metric */
678 		ri.emetric.routerid = htonl(rdeconf->rtr_id.s_addr);
679 		ri.emetric.as = r->emetric.as;
680 		ri.emetric.tag = r->emetric.tag;
681 		ri.emetric.metric = r->emetric.metric;
682 		if (kr->priority == rdeconf->fib_priority_internal)
683 			ri.emetric.protocol = EIGRP_EXT_PROTO_EIGRP;
684 		else if (kr->priority == RTP_STATIC)
685 			ri.emetric.protocol = EIGRP_EXT_PROTO_STATIC;
686 		else if (kr->priority == RTP_RIP)
687 			ri.emetric.protocol = EIGRP_EXT_PROTO_RIP;
688 		else if (kr->priority == RTP_OSPF)
689 			ri.emetric.protocol = EIGRP_EXT_PROTO_OSPF;
690 		else
691 			ri.emetric.protocol = EIGRP_EXT_PROTO_CONN;
692 		ri.emetric.flags = 0;
693 
694 		rde_check_update(eigrp->rnbr_redist, &ri);
695 	}
696 }
697 
698 void
rt_summary_set(struct eigrp * eigrp,struct summary_addr * summary,struct classic_metric * metric)699 rt_summary_set(struct eigrp *eigrp, struct summary_addr *summary,
700     struct classic_metric *metric)
701 {
702 	struct rinfo		 ri;
703 
704 	memset(&ri, 0, sizeof(ri));
705 	ri.af = eigrp->af;
706 	ri.type = EIGRP_ROUTE_INTERNAL;
707 	ri.prefix = summary->prefix;
708 	ri.prefixlen = summary->prefixlen;
709 	ri.metric = *metric;
710 
711 	rde_check_update(eigrp->rnbr_summary, &ri);
712 }
713 
714 /* send all known routing information to new neighbor */
715 static void
rt_snap(struct rde_nbr * nbr)716 rt_snap(struct rde_nbr *nbr)
717 {
718 	struct eigrp		*eigrp = nbr->eigrp;
719 	struct rt_node		*rn;
720 	struct rinfo		 ri;
721 
722 	RB_FOREACH(rn, rt_tree, &eigrp->topology)
723 		if (rn->state == DUAL_STA_PASSIVE &&
724 		    !rde_summary_check(nbr->ei, &rn->prefix, rn->prefixlen)) {
725 			rinfo_fill_successor(rn, &ri);
726 			rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE,
727 			    nbr->peerid, 0, &ri, sizeof(ri));
728 		}
729 
730 	rde_imsg_compose_eigrpe(IMSG_SEND_UPDATE_END, nbr->peerid, 0,
731 	    NULL, 0);
732 }
733 
734 static struct ctl_rt *
rt_to_ctl(struct rt_node * rn,struct eigrp_route * route)735 rt_to_ctl(struct rt_node *rn, struct eigrp_route *route)
736 {
737 	static struct ctl_rt	 rtctl;
738 
739 	memset(&rtctl, 0, sizeof(rtctl));
740 	rtctl.af = route->nbr->eigrp->af;
741 	rtctl.as = route->nbr->eigrp->as;
742 	rtctl.prefix = rn->prefix;
743 	rtctl.prefixlen = rn->prefixlen;
744 	rtctl.type = route->type;
745 	rtctl.nexthop = route->nexthop;
746 	if (route->nbr->flags & F_RDE_NBR_REDIST)
747 		strlcpy(rtctl.ifname, "redistribute", sizeof(rtctl.ifname));
748 	else if (route->nbr->flags & F_RDE_NBR_SUMMARY)
749 		strlcpy(rtctl.ifname, "summary", sizeof(rtctl.ifname));
750 	else
751 		memcpy(rtctl.ifname, route->nbr->ei->iface->name,
752 		    sizeof(rtctl.ifname));
753 	rtctl.distance = route->distance;
754 	rtctl.rdistance = route->rdistance;
755 	rtctl.fdistance = rn->successor.fdistance;
756 	rtctl.state = rn->state;
757 	/* metric */
758 	rtctl.metric.delay = eigrp_real_delay(route->metric.delay);
759 	/* translate to microseconds */
760 	rtctl.metric.delay *= 10;
761 	rtctl.metric.bandwidth = eigrp_real_bandwidth(route->metric.bandwidth);
762 	rtctl.metric.mtu = metric_decode_mtu(route->metric.mtu);
763 	rtctl.metric.hop_count = route->metric.hop_count;
764 	rtctl.metric.reliability = route->metric.reliability;
765 	rtctl.metric.load = route->metric.load;
766 	/* external metric */
767 	rtctl.emetric = route->emetric;
768 
769 	if (route->nbr == rn->successor.nbr)
770 		rtctl.flags |= F_CTL_RT_SUCCESSOR;
771 	else if (route->rdistance < rn->successor.fdistance)
772 		rtctl.flags |= F_CTL_RT_FSUCCESSOR;
773 
774 	return (&rtctl);
775 }
776 
777 static void
rt_dump(struct ctl_show_topology_req * treq,pid_t pid)778 rt_dump(struct ctl_show_topology_req *treq, pid_t pid)
779 {
780 	struct eigrp		*eigrp;
781 	struct rt_node		*rn;
782 	struct eigrp_route	*route;
783 	struct ctl_rt		*rtctl;
784 	int			 first = 1;
785 
786 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) {
787 		RB_FOREACH(rn, rt_tree, &eigrp->topology) {
788 			if (eigrp_addrisset(treq->af, &treq->prefix) &&
789 			    eigrp_addrcmp(treq->af, &treq->prefix,
790 			    &rn->prefix))
791 				continue;
792 
793 			if (treq->prefixlen &&
794 			    (treq->prefixlen != rn->prefixlen))
795 				continue;
796 
797 			first = 1;
798 			TAILQ_FOREACH(route, &rn->routes, entry) {
799 				if (treq->flags & F_CTL_ACTIVE &&
800 				    !(rn->state & DUAL_STA_ACTIVE_ALL))
801 					continue;
802 				if (!(treq->flags & F_CTL_ALLLINKS) &&
803 				    route->rdistance >= rn->successor.fdistance)
804 					continue;
805 
806 				rtctl = rt_to_ctl(rn, route);
807 				if (first) {
808 					rtctl->flags |= F_CTL_RT_FIRST;
809 					first = 0;
810 				}
811 				rde_imsg_compose_eigrpe(IMSG_CTL_SHOW_TOPOLOGY,
812 				    0, pid, rtctl, sizeof(*rtctl));
813 			}
814 		}
815 	}
816 }
817