xref: /openbsd/usr.sbin/ripd/rde.c (revision 79911490)
1 /*	$OpenBSD: rde.c,v 1.23 2021/01/19 10:18:56 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2006 Michele Marchetto <mydecay@openbeer.it>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34 
35 #include "ripd.h"
36 #include "rip.h"
37 #include "ripe.h"
38 #include "log.h"
39 #include "rde.h"
40 
41 #define	MINIMUM(a, b)	(((a) < (b)) ? (a) : (b))
42 
43 struct ripd_conf	*rdeconf = NULL;
44 struct imsgev		*iev_ripe;
45 struct imsgev		*iev_main;
46 
47 void	rde_sig_handler(int, short, void *);
48 __dead void rde_shutdown(void);
49 void	rde_dispatch_imsg(int, short, void *);
50 void	rde_dispatch_parent(int, short, void *);
51 int	rde_imsg_compose_ripe(int, u_int32_t, pid_t, void *, u_int16_t);
52 int	rde_check_route(struct rip_route *);
53 void	triggered_update(struct rt_node *);
54 
55 /* ARGSUSED */
56 void
57 rde_sig_handler(int sig, short event, void *arg)
58 {
59 	/*
60 	 * signal handler rules don't apply, libevent decouples for us
61 	 */
62 
63 	switch (sig) {
64 	case SIGINT:
65 	case SIGTERM:
66 		rde_shutdown();
67 		/* NOTREACHED */
68 	default:
69 		fatalx("unexpected signal");
70 	}
71 }
72 
73 /* route decision engine */
74 pid_t
75 rde(struct ripd_conf *xconf, int pipe_parent2rde[2], int pipe_ripe2rde[2],
76     int pipe_parent2ripe[2])
77 {
78 	struct event		 ev_sigint, ev_sigterm;
79 	struct passwd		*pw;
80 	struct redistribute	*r;
81 	pid_t			 pid;
82 
83 	switch (pid = fork()) {
84 	case -1:
85 		fatal("cannot fork");
86 		/* NOTREACHED */
87 	case 0:
88 		break;
89 	default:
90 		return (pid);
91 	}
92 
93 	rdeconf = xconf;
94 
95 	if ((pw = getpwnam(RIPD_USER)) == NULL)
96 		fatal("getpwnam");
97 
98 	if (chroot(pw->pw_dir) == -1)
99 		fatal("chroot");
100 	if (chdir("/") == -1)
101 		fatal("chdir(\"/\")");
102 
103 	setproctitle("route decision engine");
104 	log_procname = "rde";
105 
106 	if (setgroups(1, &pw->pw_gid) ||
107 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
108 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
109 		fatal("can't drop privileges");
110 
111 	if (pledge("stdio", NULL) == -1)
112 		fatal("pledge");
113 
114 	event_init();
115 
116 	/* setup signal handler */
117 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
118 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
119 	signal_add(&ev_sigint, NULL);
120 	signal_add(&ev_sigterm, NULL);
121 	signal(SIGPIPE, SIG_IGN);
122 	signal(SIGHUP, SIG_IGN);
123 
124 	/* setup pipes */
125 	close(pipe_ripe2rde[0]);
126 	close(pipe_parent2rde[0]);
127 	close(pipe_parent2ripe[0]);
128 	close(pipe_parent2ripe[1]);
129 
130 	if ((iev_ripe = malloc(sizeof(struct imsgev))) == NULL ||
131 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
132 		fatal(NULL);
133 	imsg_init(&iev_ripe->ibuf, pipe_ripe2rde[1]);
134 	iev_ripe->handler =  rde_dispatch_imsg;
135 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
136 	iev_main->handler = rde_dispatch_parent;
137 
138 	/* setup event handler */
139 	iev_ripe->events = EV_READ;
140 	event_set(&iev_ripe->ev, iev_ripe->ibuf.fd, iev_ripe->events,
141 	    iev_ripe->handler, iev_ripe);
142 	event_add(&iev_ripe->ev, NULL);
143 
144 	iev_main->events = EV_READ;
145 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
146 	    iev_main->handler, iev_main);
147 	event_add(&iev_main->ev, NULL);
148 	rt_init();
149 
150 	/* remove unneeded config stuff */
151 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
152 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
153 		free(r);
154 	}
155 
156 	event_dispatch();
157 
158 	rde_shutdown();
159 	/* NOTREACHED */
160 
161 	return (0);
162 }
163 
164 __dead void
165 rde_shutdown(void)
166 {
167 	/* close pipes */
168 	msgbuf_clear(&iev_ripe->ibuf.w);
169 	close(iev_ripe->ibuf.fd);
170 	msgbuf_clear(&iev_main->ibuf.w);
171 	close(iev_main->ibuf.fd);
172 
173 	rt_clear();
174 	free(iev_ripe);
175 	free(iev_main);
176 	free(rdeconf);
177 
178 	log_info("route decision engine exiting");
179 	_exit(0);
180 }
181 
182 int
183 rde_imsg_compose_ripe(int type, u_int32_t peerid, pid_t pid, void *data,
184     u_int16_t datalen)
185 {
186 	return (imsg_compose_event(iev_ripe, type, peerid, pid, -1,
187 		    data, datalen));
188 }
189 
190 /* ARGSUSED */
191 void
192 rde_dispatch_imsg(int fd, short event, void *bula)
193 {
194 	struct imsgev		*iev = bula;
195 	struct imsgbuf		*ibuf = &iev->ibuf;
196 	struct rip_route	 rr;
197 	struct imsg		 imsg;
198 	ssize_t			 n;
199 	int			 shut = 0, verbose;
200 
201 	if (event & EV_READ) {
202 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
203 			fatal("imsg_read error");
204 		if (n == 0)	/* connection closed */
205 			shut = 1;
206 	}
207 	if (event & EV_WRITE) {
208 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
209 			fatal("msgbuf_write");
210 		if (n == 0)	/* connection closed */
211 			shut = 1;
212 	}
213 
214 	for (;;) {
215 		if ((n = imsg_get(ibuf, &imsg)) == -1)
216 			fatal("rde_dispatch_imsg: imsg_get error");
217 		if (n == 0)
218 			break;
219 
220 		switch (imsg.hdr.type) {
221 		case IMSG_ROUTE_FEED:
222 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr))
223 				fatalx("invalid size of RDE request");
224 
225 			memcpy(&rr, imsg.data, sizeof(rr));
226 
227 			if (rde_check_route(&rr) == -1)
228 				log_debug("rde_dispatch_imsg: "
229 				    "packet malformed\n");
230 			break;
231 		case IMSG_FULL_REQUEST:
232 			bzero(&rr, sizeof(rr));
233 			/*
234 			 * AFI == 0 && metric == INFINITY request the
235 			 * whole routing table
236 			 */
237 			rr.metric = INFINITY;
238 			rde_imsg_compose_ripe(IMSG_REQUEST_ADD, 0,
239 			    0, &rr, sizeof(rr));
240 			rde_imsg_compose_ripe(IMSG_SEND_REQUEST, 0,
241 			    0, NULL, 0);
242 			break;
243 		case IMSG_FULL_RESPONSE:
244 			rt_snap(imsg.hdr.peerid);
245 			rde_imsg_compose_ripe(IMSG_SEND_RESPONSE,
246 			    imsg.hdr.peerid, 0, NULL, 0);
247 			break;
248 		case IMSG_ROUTE_REQUEST:
249 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr))
250 				fatalx("invalid size of RDE request");
251 
252 			memcpy(&rr, imsg.data, sizeof(rr));
253 
254 			rt_complete(&rr);
255 			rde_imsg_compose_ripe(IMSG_RESPONSE_ADD,
256 			    imsg.hdr.peerid, 0, &rr, sizeof(rr));
257 
258 			break;
259 		case IMSG_ROUTE_REQUEST_END:
260 			rde_imsg_compose_ripe(IMSG_SEND_RESPONSE,
261 			    imsg.hdr.peerid, 0, NULL, 0);
262 			break;
263 		case IMSG_CTL_SHOW_RIB:
264 			rt_dump(imsg.hdr.pid);
265 
266 			imsg_compose_event(iev_ripe, IMSG_CTL_END, 0,
267 			    imsg.hdr.pid, -1, NULL, 0);
268 
269 			break;
270 		case IMSG_CTL_LOG_VERBOSE:
271 			/* already checked by ripe */
272 			memcpy(&verbose, imsg.data, sizeof(verbose));
273 			log_verbose(verbose);
274 			break;
275 		default:
276 			log_debug("rde_dispatch_msg: unexpected imsg %d",
277 			    imsg.hdr.type);
278 			break;
279 		}
280 		imsg_free(&imsg);
281 	}
282 	if (!shut)
283 		imsg_event_add(iev);
284 	else {
285 		/* this pipe is dead, so remove the event handler */
286 		event_del(&iev->ev);
287 		event_loopexit(NULL);
288 	}
289 }
290 
291 /* ARGSUSED */
292 void
293 rde_dispatch_parent(int fd, short event, void *bula)
294 {
295 	struct imsg		 imsg;
296 	struct rt_node		*rt;
297 	struct kroute		 kr;
298 	struct imsgev		*iev = bula;
299 	struct imsgbuf		*ibuf = &iev->ibuf;
300 	ssize_t			 n;
301 	int			 shut = 0;
302 
303 	if (event & EV_READ) {
304 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
305 			fatal("imsg_read error");
306 		if (n == 0)	/* connection closed */
307 			shut = 1;
308 	}
309 	if (event & EV_WRITE) {
310 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
311 			fatal("msgbuf_write");
312 		if (n == 0)	/* connection closed */
313 			shut = 1;
314 	}
315 
316 	for (;;) {
317 		if ((n = imsg_get(ibuf, &imsg)) == -1)
318 			fatal("rde_dispatch_parent: imsg_get error");
319 		if (n == 0)
320 			break;
321 
322 		switch (imsg.hdr.type) {
323 		case IMSG_NETWORK_ADD:
324 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) {
325 				log_warnx("rde_dispatch: wrong imsg len");
326 				break;
327 			}
328 
329 			memcpy(&kr, imsg.data, sizeof(kr));
330 
331 			rt = rt_new_kr(&kr);
332 			rt_insert(rt);
333 			break;
334 		case IMSG_NETWORK_DEL:
335 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) {
336 				log_warnx("rde_dispatch: wrong imsg len");
337 				break;
338 			}
339 			memcpy(&kr, imsg.data, sizeof(kr));
340 
341 			if ((rt = rt_find(kr.prefix.s_addr,
342 			    kr.netmask.s_addr)) != NULL)
343 				rt_remove(rt);
344 			break;
345 		default:
346 			log_debug("rde_dispatch_parent: unexpected imsg %d",
347 			    imsg.hdr.type);
348 			break;
349 		}
350 		imsg_free(&imsg);
351 	}
352 	if (!shut)
353 		imsg_event_add(iev);
354 	else {
355 		/* this pipe is dead, so remove the event handler */
356 		event_del(&iev->ev);
357 		event_loopexit(NULL);
358 	}
359 }
360 
361 void
362 rde_send_change_kroute(struct rt_node *r)
363 {
364 	struct kroute	 kr;
365 
366 	bzero(&kr, sizeof(kr));
367 	kr.prefix.s_addr = r->prefix.s_addr;
368 	kr.nexthop.s_addr = r->nexthop.s_addr;
369 	kr.netmask.s_addr = r->netmask.s_addr;
370 	kr.metric = r->metric;
371 	kr.flags = r->flags;
372 	kr.ifindex = r->ifindex;
373 
374 	imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1,
375 	    &kr, sizeof(kr));
376 }
377 
378 void
379 rde_send_delete_kroute(struct rt_node *r)
380 {
381 	struct kroute	 kr;
382 
383 	bzero(&kr, sizeof(kr));
384 	kr.prefix.s_addr = r->prefix.s_addr;
385 	kr.nexthop.s_addr = r->nexthop.s_addr;
386 	kr.netmask.s_addr = r->netmask.s_addr;
387 	kr.metric = r->metric;
388 	kr.flags = r->flags;
389 	kr.ifindex = r->ifindex;
390 
391 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
392 	    &kr, sizeof(kr));
393 }
394 
395 int
396 rde_check_route(struct rip_route *e)
397 {
398 	struct timeval	 tv, now;
399 	struct rt_node	*rn;
400 	struct iface	*iface;
401 	u_int8_t	 metric;
402 
403 	if ((e->nexthop.s_addr & htonl(IN_CLASSA_NET)) ==
404 	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET) ||
405 	    e->nexthop.s_addr == INADDR_ANY)
406 		return (-1);
407 
408 	if ((iface = if_find_index(e->ifindex)) == NULL)
409 		return (-1);
410 
411 	metric = MINIMUM(INFINITY, e->metric + iface->cost);
412 
413 	if ((rn = rt_find(e->address.s_addr, e->mask.s_addr)) == NULL) {
414 		if (metric >= INFINITY)
415 			return (0);
416 		rn = rt_new_rr(e, metric);
417 		rt_insert(rn);
418 		rde_send_change_kroute(rn);
419 		route_start_timeout(rn);
420 		triggered_update(rn);
421 	} else {
422 		/*
423 		 * XXX don't we have to track all incoming routes?
424 		 * what happens if the kernel route is removed later.
425 		 */
426 		if (rn->flags & F_KERNEL)
427 			return (0);
428 
429 		if (metric < rn->metric) {
430 			rn->metric = metric;
431 			rn->nexthop.s_addr = e->nexthop.s_addr;
432 			rn->ifindex = e->ifindex;
433 			rde_send_change_kroute(rn);
434 			triggered_update(rn);
435 		} else if (e->nexthop.s_addr == rn->nexthop.s_addr &&
436 		    metric > rn->metric) {
437 				rn->metric = metric;
438 				rde_send_change_kroute(rn);
439 				triggered_update(rn);
440 				if (rn->metric == INFINITY)
441 					route_start_garbage(rn);
442 		} else if (e->nexthop.s_addr != rn->nexthop.s_addr &&
443 		    metric == rn->metric) {
444 			/* If the new metric is the same as the old one,
445 			 * examine the timeout for the existing route.  If it
446 			 * is at least halfway to the expiration point, switch
447 			 * to the new route.
448 			 */
449 			timerclear(&tv);
450 			gettimeofday(&now, NULL);
451 			evtimer_pending(&rn->timeout_timer, &tv);
452 			if (tv.tv_sec - now.tv_sec < ROUTE_TIMEOUT / 2) {
453 				rn->nexthop.s_addr = e->nexthop.s_addr;
454 				rn->ifindex = e->ifindex;
455 				rde_send_change_kroute(rn);
456 			}
457 		}
458 
459 		if (e->nexthop.s_addr == rn->nexthop.s_addr &&
460 		    rn->metric < INFINITY)
461 			route_reset_timers(rn);
462 	}
463 
464 	return (0);
465 }
466 
467 void
468 triggered_update(struct rt_node *rn)
469 {
470 	struct rip_route	 rr;
471 
472 	rr.address.s_addr = rn->prefix.s_addr;
473 	rr.mask.s_addr = rn->netmask.s_addr;
474 	rr.nexthop.s_addr = rn->nexthop.s_addr;
475 	rr.metric = rn->metric;
476 	rr.ifindex = rn->ifindex;
477 
478 	rde_imsg_compose_ripe(IMSG_SEND_TRIGGERED_UPDATE, 0, 0, &rr,
479 	    sizeof(struct rip_route));
480 }
481