xref: /openbsd/usr.sbin/ripd/kroute.c (revision 905646f0)
1 /*	$OpenBSD: kroute.c,v 1.34 2019/12/11 21:04:59 remi Exp $ */
2 
3 /*
4  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
5  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/types.h>
21 #include <sys/socket.h>
22 #include <sys/sysctl.h>
23 #include <sys/tree.h>
24 #include <sys/uio.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <net/if.h>
28 #include <net/if_dl.h>
29 #include <net/if_types.h>
30 #include <net/route.h>
31 #include <err.h>
32 #include <errno.h>
33 #include <fcntl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 
39 #include "rip.h"
40 #include "ripd.h"
41 #include "log.h"
42 
43 struct {
44 	u_int32_t		rtseq;
45 	pid_t			pid;
46 	int			fib_sync;
47 	u_int8_t		fib_prio;
48 	int			fd;
49 	struct event		ev;
50 	u_int			rdomain;
51 } kr_state;
52 
53 struct kroute_node {
54 	RB_ENTRY(kroute_node)	 entry;
55 	struct kroute		 r;
56 };
57 
58 struct kif_node {
59 	RB_ENTRY(kif_node)	 entry;
60 	struct kif		 k;
61 };
62 
63 void	kr_redistribute(int, struct kroute *);
64 int	kroute_compare(struct kroute_node *, struct kroute_node *);
65 int	kif_compare(struct kif_node *, struct kif_node *);
66 int	kr_change_fib(struct kroute_node *, struct kroute *, int);
67 
68 struct kroute_node	*kroute_find(in_addr_t, in_addr_t, u_int8_t);
69 int			 kroute_insert(struct kroute_node *);
70 int			 kroute_remove(struct kroute_node *);
71 void			 kroute_clear(void);
72 
73 struct kif_node		*kif_find(int);
74 int			 kif_insert(struct kif_node *);
75 int			 kif_remove(struct kif_node *);
76 void			 kif_clear(void);
77 int			 kif_validate(int);
78 
79 struct kroute_node	*kroute_match(in_addr_t);
80 
81 int		protect_lo(void);
82 u_int8_t	prefixlen_classful(in_addr_t);
83 void		get_rtaddrs(int, struct sockaddr *, struct sockaddr **);
84 void		if_change(u_short, int, struct if_data *);
85 void		if_announce(void *);
86 
87 int		send_rtmsg(int, int, struct kroute *);
88 int		dispatch_rtmsg(void);
89 int		fetchtable(void);
90 int		fetchifs(int);
91 
92 RB_HEAD(kroute_tree, kroute_node)	krt;
93 RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare)
94 RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare)
95 
96 RB_HEAD(kif_tree, kif_node)		kit;
97 RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare)
98 RB_GENERATE(kif_tree, kif_node, entry, kif_compare)
99 
100 int
101 kif_init(void)
102 {
103 	RB_INIT(&kit);
104 
105 	if (fetchifs(0) == -1)
106 		return (-1);
107 
108 	return (0);
109 }
110 
111 int
112 kr_init(int fs, u_int rdomain, u_int8_t fib_prio)
113 {
114 	int		opt = 0, rcvbuf, default_rcvbuf;
115 	socklen_t	optlen;
116 
117 	if ((kr_state.fd = socket(AF_ROUTE,
118 	    SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)) == -1) {
119 		log_warn("kr_init: socket");
120 		return (-1);
121 	}
122 
123 	/* not interested in my own messages */
124 	if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK,
125 	    &opt, sizeof(opt)) == -1)
126 		log_warn("kr_init: setsockopt");	/* not fatal */
127 
128 	/* grow receive buffer, don't wanna miss messages */
129 	optlen = sizeof(default_rcvbuf);
130 	if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
131 	    &default_rcvbuf, &optlen) == -1)
132 		log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF");
133 	else
134 		for (rcvbuf = MAX_RTSOCK_BUF;
135 		    rcvbuf > default_rcvbuf &&
136 		    setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
137 		    &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS;
138 		    rcvbuf /= 2)
139 			;	/* nothing */
140 
141 	kr_state.pid = getpid();
142 	kr_state.rtseq = 1;
143 	kr_state.fib_prio = fib_prio;
144 
145 	RB_INIT(&krt);
146 
147 	if (fetchtable() == -1)
148 		return (-1);
149 
150 	if (protect_lo() == -1)
151 		return (-1);
152 
153 	kr_state.fib_sync = fs; /* now set correct sync mode */
154 	kr_state.rdomain = rdomain;
155 
156 	event_set(&kr_state.ev, kr_state.fd, EV_READ | EV_PERSIST,
157 	    kr_dispatch_msg, NULL);
158 	event_add(&kr_state.ev, NULL);
159 
160 	return (0);
161 }
162 
163 int
164 kr_change_fib(struct kroute_node *kr, struct kroute *kroute, int action)
165 {
166 	/* nexthop within 127/8 -> ignore silently */
167 	if ((kroute->nexthop.s_addr & htonl(IN_CLASSA_NET)) ==
168 	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET))
169 		return (0);
170 
171 	if (send_rtmsg(kr_state.fd, action, kroute) == -1)
172 		return (-1);
173 
174 	if (action == RTM_ADD) {
175 		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL)
176 			fatal("kr_change_fib");
177 
178 		kr->r.prefix.s_addr = kroute->prefix.s_addr;
179 		kr->r.netmask.s_addr = kroute->netmask.s_addr;
180 		kr->r.nexthop.s_addr = kroute->nexthop.s_addr;
181 		kr->r.flags = kroute->flags |= F_RIPD_INSERTED;
182 		kr->r.priority = kr_state.fib_prio;
183 
184 		if (kroute_insert(kr) == -1) {
185 			log_debug("kr_update_fib: cannot insert %s",
186 			    inet_ntoa(kroute->nexthop));
187 		}
188 	} else
189 		kr->r.nexthop.s_addr = kroute->nexthop.s_addr;
190 
191 	return (0);
192 }
193 
194 int
195 kr_change(struct kroute *kroute)
196 {
197 	struct kroute_node	*kr;
198 	int			 action = RTM_ADD;
199 
200 	kr = kroute_find(kroute->prefix.s_addr, kroute->netmask.s_addr,
201 	    kr_state.fib_prio);
202 	if (kr != NULL)
203 		action = RTM_CHANGE;
204 
205 	return (kr_change_fib(kr, kroute, action));
206 }
207 
208 int
209 kr_delete(struct kroute *kroute)
210 {
211 	struct kroute_node	*kr;
212 
213 	kr = kroute_find(kroute->prefix.s_addr, kroute->netmask.s_addr,
214 	    kr_state.fib_prio);
215 	if (kr == NULL)
216 		return (0);
217 
218 	if (kr->r.priority != kr_state.fib_prio)
219 		log_warn("kr_delete_fib: %s/%d has wrong priority %d",
220 		    inet_ntoa(kr->r.prefix), mask2prefixlen(kr->r.netmask.s_addr),
221 		    kr->r.priority);
222 
223 	if (send_rtmsg(kr_state.fd, RTM_DELETE, kroute) == -1)
224 		return (-1);
225 
226 	if (kroute_remove(kr) == -1)
227 		return (-1);
228 
229 	return (0);
230 }
231 
232 void
233 kr_shutdown(void)
234 {
235 	kr_fib_decouple();
236 
237 	kroute_clear();
238 	kif_clear();
239 }
240 
241 void
242 kr_fib_couple(void)
243 {
244 	struct kroute_node	*kr;
245 
246 	if (kr_state.fib_sync == 1)	/* already coupled */
247 		return;
248 
249 	kr_state.fib_sync = 1;
250 
251 	RB_FOREACH(kr, kroute_tree, &krt)
252 		if (kr->r.priority == kr_state.fib_prio)
253 			send_rtmsg(kr_state.fd, RTM_ADD, &kr->r);
254 
255 	log_info("kernel routing table coupled");
256 }
257 
258 void
259 kr_fib_decouple(void)
260 {
261 	struct kroute_node	*kr;
262 
263 	if (kr_state.fib_sync == 0)	/* already decoupled */
264 		return;
265 
266 	RB_FOREACH(kr, kroute_tree, &krt)
267 		if (kr->r.priority == kr_state.fib_prio)
268 			send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r);
269 
270 	kr_state.fib_sync = 0;
271 
272 	log_info("kernel routing table decoupled");
273 }
274 
275 /* ARGSUSED */
276 void
277 kr_dispatch_msg(int fd, short event, void *bula)
278 {
279 	dispatch_rtmsg();
280 }
281 
282 void
283 kr_show_route(struct imsg *imsg)
284 {
285 	struct kroute_node	*kr;
286 	int			 flags;
287 	struct in_addr		 addr;
288 
289 	switch (imsg->hdr.type) {
290 	case IMSG_CTL_KROUTE:
291 		if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags)) {
292 			log_warnx("kr_show_route: wrong imsg len");
293 			return;
294 		}
295 		memcpy(&flags, imsg->data, sizeof(flags));
296 		RB_FOREACH(kr, kroute_tree, &krt)
297 			if (!flags || kr->r.flags & flags) {
298 				main_imsg_compose_ripe(IMSG_CTL_KROUTE,
299 				    imsg->hdr.pid, &kr->r, sizeof(kr->r));
300 			}
301 		break;
302 	case IMSG_CTL_KROUTE_ADDR:
303 		if (imsg->hdr.len != IMSG_HEADER_SIZE +
304 		    sizeof(struct in_addr)) {
305 			log_warnx("kr_show_route: wrong imsg len");
306 			return;
307 		}
308 		memcpy(&addr, imsg->data, sizeof(addr));
309 		kr = NULL;
310 		kr = kroute_match(addr.s_addr);
311 		if (kr != NULL)
312 			main_imsg_compose_ripe(IMSG_CTL_KROUTE, imsg->hdr.pid,
313 			    &kr->r, sizeof(kr->r));
314 		break;
315 	default:
316 		log_debug("kr_show_route: error handling imsg");
317 		break;
318 	}
319 
320 	main_imsg_compose_ripe(IMSG_CTL_END, imsg->hdr.pid, NULL, 0);
321 }
322 
323 void
324 kr_ifinfo(char *ifname, pid_t pid)
325 {
326 	struct kif_node	*kif;
327 
328 	RB_FOREACH(kif, kif_tree, &kit)
329 		if (ifname == NULL || !strcmp(ifname, kif->k.ifname)) {
330 			main_imsg_compose_ripe(IMSG_CTL_IFINFO,
331 			    pid, &kif->k, sizeof(kif->k));
332 		}
333 
334 	main_imsg_compose_ripe(IMSG_CTL_END, pid, NULL, 0);
335 }
336 
337 void
338 kr_redistribute(int type, struct kroute *kr)
339 {
340 	u_int32_t	a;
341 
342 
343 	if (type == IMSG_NETWORK_DEL) {
344 dont_redistribute:
345 		/* was the route redistributed? */
346 		if (kr->flags & F_REDISTRIBUTED) {
347 			/* remove redistributed flag */
348 			kr->flags &= ~F_REDISTRIBUTED;
349 			main_imsg_compose_rde(type, 0, kr,
350 			    sizeof(struct kroute));
351 		}
352 		return;
353 	}
354 
355 	/* interface is not up and running so don't announce */
356 	if (kr->flags & F_DOWN)
357 		return;
358 
359 	/*
360 	 * We consider the loopback net, multicast and experimental addresses
361 	 * as not redistributable.
362 	 */
363 	a = ntohl(kr->prefix.s_addr);
364 	if (IN_MULTICAST(a) || IN_BADCLASS(a) ||
365 	    (a >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
366 		return;
367 	/*
368 	 * Consider networks with nexthop loopback as not redistributable
369 	 * unless it is a reject or blackhole route.
370 	 */
371 	if (kr->nexthop.s_addr == htonl(INADDR_LOOPBACK) &&
372 	    !(kr->flags & (F_BLACKHOLE|F_REJECT)))
373 		return;
374 
375 	/* Should we redistribute this route? */
376 	if (!rip_redistribute(kr))
377 		goto dont_redistribute;
378 
379 	/* Does not matter if we resend the kr, the RDE will cope. */
380 	kr->flags |= F_REDISTRIBUTED;
381 	main_imsg_compose_rde(type, 0, kr, sizeof(struct kroute));
382 }
383 
384 /* rb-tree compare */
385 int
386 kroute_compare(struct kroute_node *a, struct kroute_node *b)
387 {
388 	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
389 		return (-1);
390 	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
391 		return (1);
392 	if (ntohl(a->r.netmask.s_addr) < ntohl(b->r.netmask.s_addr))
393 		return (-1);
394 	if (ntohl(a->r.netmask.s_addr) > ntohl(b->r.netmask.s_addr))
395 		return (1);
396 
397 	/* if the priority is RTP_ANY finish on the first address hit */
398 	if (a->r.priority == RTP_ANY || b->r.priority == RTP_ANY)
399 		return (0);
400 	if (a->r.priority < b->r.priority)
401 		return (-1);
402 	if (a->r.priority > b->r.priority)
403 		return (1);
404 
405 	return (0);
406 }
407 
408 int
409 kif_compare(struct kif_node *a, struct kif_node *b)
410 {
411 	return (b->k.ifindex - a->k.ifindex);
412 }
413 
414 /* tree management */
415 struct kroute_node *
416 kroute_find(in_addr_t prefix, in_addr_t netmask, u_int8_t prio)
417 {
418 	struct kroute_node	s, *kn, *tmp;
419 
420 	s.r.prefix.s_addr = prefix;
421 	s.r.netmask.s_addr = netmask;
422 	s.r.priority = prio;
423 
424 	kn = RB_FIND(kroute_tree, &krt, &s);
425 	if (kn && prio == RTP_ANY) {
426 		tmp = RB_PREV(kroute_tree, &krt, kn);
427 		while (tmp) {
428 			if (kroute_compare(&s, tmp) == 0)
429 				kn = tmp;
430 			else
431 				break;
432 			tmp = RB_PREV(kroute_tree, &krt, kn);
433 		}
434 	}
435 
436 	return (kn);
437 }
438 
439 int
440 kroute_insert(struct kroute_node *kr)
441 {
442 	if (RB_INSERT(kroute_tree, &krt, kr) != NULL) {
443 		log_warnx("kroute_insert failed for %s/%u",
444 		    inet_ntoa(kr->r.prefix),
445 		    mask2prefixlen(kr->r.netmask.s_addr));
446 		free(kr);
447 		return (-1);
448 	}
449 
450 	if (!(kr->r.flags & F_KERNEL)) {
451 		/* don't validate or redistribute rip route */
452 		kr->r.flags &= ~F_DOWN;
453 		return (0);
454 	}
455 
456 	if (kif_validate(kr->r.ifindex))
457 		kr->r.flags &= ~F_DOWN;
458 	else
459 		kr->r.flags |= F_DOWN;
460 
461 	kr_redistribute(IMSG_NETWORK_ADD, &kr->r);
462 
463 	return (0);
464 }
465 
466 int
467 kroute_remove(struct kroute_node *kr)
468 {
469 	if (RB_REMOVE(kroute_tree, &krt, kr) == NULL) {
470 		log_warnx("kroute_remove failed for %s/%u",
471 		    inet_ntoa(kr->r.prefix),
472 		    mask2prefixlen(kr->r.netmask.s_addr));
473 		return (-1);
474 	}
475 
476 	kr_redistribute(IMSG_NETWORK_DEL, &kr->r);
477 	rtlabel_unref(kr->r.rtlabel);
478 
479 	free(kr);
480 	return (0);
481 }
482 
483 void
484 kroute_clear(void)
485 {
486 	struct kroute_node	*kr;
487 
488 	while ((kr = RB_MIN(kroute_tree, &krt)) != NULL)
489 		kroute_remove(kr);
490 }
491 
492 struct kif_node *
493 kif_find(int ifindex)
494 {
495 	struct kif_node	s;
496 
497 	bzero(&s, sizeof(s));
498 	s.k.ifindex = ifindex;
499 
500 	return (RB_FIND(kif_tree, &kit, &s));
501 }
502 
503 struct kif *
504 kif_findname(char *ifname)
505 {
506 	struct kif_node	*kif;
507 
508 	RB_FOREACH(kif, kif_tree, &kit)
509 		if (!strcmp(ifname, kif->k.ifname))
510 			return (&kif->k);
511 
512 	return (NULL);
513 }
514 
515 int
516 kif_insert(struct kif_node *kif)
517 {
518 	if (RB_INSERT(kif_tree, &kit, kif) != NULL) {
519 		log_warnx("RB_INSERT(kif_tree, &kit, kif)");
520 		free(kif);
521 		return (-1);
522 	}
523 
524 	return (0);
525 }
526 
527 int
528 kif_remove(struct kif_node *kif)
529 {
530 	if (RB_REMOVE(kif_tree, &kit, kif) == NULL) {
531 		log_warnx("RB_REMOVE(kif_tree, &kit, kif)");
532 		return (-1);
533 	}
534 
535 	free(kif);
536 	return (0);
537 }
538 
539 void
540 kif_clear(void)
541 {
542 	struct kif_node	*kif;
543 
544 	while ((kif = RB_MIN(kif_tree, &kit)) != NULL)
545 		kif_remove(kif);
546 }
547 
548 int
549 kif_validate(int ifindex)
550 {
551 	struct kif_node		*kif;
552 
553 	if ((kif = kif_find(ifindex)) == NULL) {
554 		log_warnx("interface with index %u not found", ifindex);
555 		return (1);
556 	}
557 
558 	return (kif->k.nh_reachable);
559 }
560 
561 struct kroute_node *
562 kroute_match(in_addr_t key)
563 {
564 	u_int8_t		 i;
565 	struct kroute_node	*kr;
566 
567 	/* we will never match the default route */
568 	for (i = 32; i > 0; i--)
569 		if ((kr = kroute_find(key & prefixlen2mask(i),
570 		    prefixlen2mask(i), RTP_ANY)) != NULL)
571 			return (kr);
572 
573 	/* if we don't have a match yet, try to find a default route */
574 	if ((kr = kroute_find(0, 0, RTP_ANY)) != NULL)
575 			return (kr);
576 
577 	return (NULL);
578 }
579 
580 /* misc */
581 int
582 protect_lo(void)
583 {
584 	struct kroute_node	*kr;
585 
586 	/* special protection for 127/8 */
587 	if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
588 		log_warn("protect_lo");
589 		return (-1);
590 	}
591 	kr->r.prefix.s_addr = htonl(INADDR_LOOPBACK);
592 	kr->r.netmask.s_addr = htonl(IN_CLASSA_NET);
593 	kr->r.flags = F_KERNEL|F_CONNECTED;
594 
595 	if (RB_INSERT(kroute_tree, &krt, kr) != NULL)
596 		free(kr);	/* kernel route already there, no problem */
597 
598 	return (0);
599 }
600 
601 u_int8_t
602 prefixlen_classful(in_addr_t ina)
603 {
604 	/* it hurt to write this. */
605 
606 	if (ina >= 0xf0000000U)		/* class E */
607 		return (32);
608 	else if (ina >= 0xe0000000U)	/* class D */
609 		return (4);
610 	else if (ina >= 0xc0000000U)	/* class C */
611 		return (24);
612 	else if (ina >= 0x80000000U)	/* class B */
613 		return (16);
614 	else				/* class A */
615 		return (8);
616 }
617 
618 u_int8_t
619 mask2prefixlen(in_addr_t ina)
620 {
621 	if (ina == 0)
622 		return (0);
623 	else
624 		return (33 - ffs(ntohl(ina)));
625 }
626 
627 in_addr_t
628 prefixlen2mask(u_int8_t prefixlen)
629 {
630 	if (prefixlen == 0)
631 		return (0);
632 
633 	return (htonl(0xffffffff << (32 - prefixlen)));
634 }
635 
636 #define ROUNDUP(a) \
637 	((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
638 
639 void
640 get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info)
641 {
642 	int	i;
643 
644 	for (i = 0; i < RTAX_MAX; i++) {
645 		if (addrs & (1 << i)) {
646 			rti_info[i] = sa;
647 			sa = (struct sockaddr *)((char *)(sa) +
648 			    ROUNDUP(sa->sa_len));
649 		} else
650 			rti_info[i] = NULL;
651 	}
652 }
653 
654 void
655 if_change(u_short ifindex, int flags, struct if_data *ifd)
656 {
657 	struct kif_node		*kif;
658 	struct kroute_node	*kr;
659 	int			 type;
660 	u_int8_t		 reachable;
661 
662 	if ((kif = kif_find(ifindex)) == NULL) {
663 		log_warnx("interface with index %u not found", ifindex);
664 		return;
665 	}
666 
667 	kif->k.flags = flags;
668 	kif->k.link_state = ifd->ifi_link_state;
669 	kif->k.if_type = ifd->ifi_type;
670 	kif->k.baudrate = ifd->ifi_baudrate;
671 
672 	if ((reachable = (flags & IFF_UP) &&
673 	    LINK_STATE_IS_UP(ifd->ifi_link_state)) == kif->k.nh_reachable)
674 		return;		/* nothing changed wrt nexthop validity */
675 
676 	kif->k.nh_reachable = reachable;
677 	type = reachable ? IMSG_NETWORK_ADD : IMSG_NETWORK_DEL;
678 
679 	/* notify ripe about interface link state */
680 	main_imsg_compose_ripe(IMSG_IFINFO, 0, &kif->k, sizeof(kif->k));
681 
682 	/* update redistribute list */
683 	RB_FOREACH(kr, kroute_tree, &krt)
684 		if (kr->r.ifindex == ifindex) {
685 			if (reachable)
686 				kr->r.flags &= ~F_DOWN;
687 			else
688 				kr->r.flags |= F_DOWN;
689 
690 			kr_redistribute(type, &kr->r);
691 		}
692 }
693 
694 void
695 if_announce(void *msg)
696 {
697 	struct if_announcemsghdr	*ifan;
698 	struct kif_node			*kif;
699 
700 	ifan = msg;
701 
702 	switch (ifan->ifan_what) {
703 	case IFAN_ARRIVAL:
704 		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
705 			log_warn("if_announce");
706 			return;
707 		}
708 
709 		kif->k.ifindex = ifan->ifan_index;
710 		strlcpy(kif->k.ifname, ifan->ifan_name, sizeof(kif->k.ifname));
711 		kif_insert(kif);
712 		break;
713 	case IFAN_DEPARTURE:
714 		kif = kif_find(ifan->ifan_index);
715 		kif_remove(kif);
716 		break;
717 	}
718 }
719 
720 /* rtsock */
721 int
722 send_rtmsg(int fd, int action, struct kroute *kroute)
723 {
724 	struct iovec		iov[4];
725 	struct rt_msghdr	hdr;
726 	struct sockaddr_in	prefix;
727 	struct sockaddr_in	nexthop;
728 	struct sockaddr_in	mask;
729 	int			iovcnt = 0;
730 
731 	if (kr_state.fib_sync == 0)
732 		return (0);
733 
734 	/* initialize header */
735 	bzero(&hdr, sizeof(hdr));
736 	hdr.rtm_version = RTM_VERSION;
737 	hdr.rtm_type = action;
738 	hdr.rtm_priority = kr_state.fib_prio;
739 	hdr.rtm_tableid = kr_state.rdomain;
740 	if (action == RTM_CHANGE)
741 		hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
742 	hdr.rtm_seq = kr_state.rtseq++;	/* overflow doesn't matter */
743 	hdr.rtm_msglen = sizeof(hdr);
744 	/* adjust iovec */
745 	iov[iovcnt].iov_base = &hdr;
746 	iov[iovcnt++].iov_len = sizeof(hdr);
747 
748 	bzero(&prefix, sizeof(prefix));
749 	prefix.sin_len = sizeof(prefix);
750 	prefix.sin_family = AF_INET;
751 	prefix.sin_addr.s_addr = kroute->prefix.s_addr;
752 	/* adjust header */
753 	hdr.rtm_addrs |= RTA_DST;
754 	hdr.rtm_msglen += sizeof(prefix);
755 	/* adjust iovec */
756 	iov[iovcnt].iov_base = &prefix;
757 	iov[iovcnt++].iov_len = sizeof(prefix);
758 
759 	if (kroute->nexthop.s_addr != 0) {
760 		bzero(&nexthop, sizeof(nexthop));
761 		nexthop.sin_len = sizeof(nexthop);
762 		nexthop.sin_family = AF_INET;
763 		nexthop.sin_addr.s_addr = kroute->nexthop.s_addr;
764 		/* adjust header */
765 		hdr.rtm_flags |= RTF_GATEWAY;
766 		hdr.rtm_addrs |= RTA_GATEWAY;
767 		hdr.rtm_msglen += sizeof(nexthop);
768 		/* adjust iovec */
769 		iov[iovcnt].iov_base = &nexthop;
770 		iov[iovcnt++].iov_len = sizeof(nexthop);
771 	}
772 
773 	bzero(&mask, sizeof(mask));
774 	mask.sin_len = sizeof(mask);
775 	mask.sin_family = AF_INET;
776 	mask.sin_addr.s_addr = kroute->netmask.s_addr;
777 	/* adjust header */
778 	hdr.rtm_addrs |= RTA_NETMASK;
779 	hdr.rtm_msglen += sizeof(mask);
780 	/* adjust iovec */
781 	iov[iovcnt].iov_base = &mask;
782 	iov[iovcnt++].iov_len = sizeof(mask);
783 
784 
785 retry:
786 	if (writev(fd, iov, iovcnt) == -1) {
787 		if (errno == ESRCH) {
788 			if (hdr.rtm_type == RTM_CHANGE) {
789 				hdr.rtm_type = RTM_ADD;
790 				goto retry;
791 			} else if (hdr.rtm_type == RTM_DELETE) {
792 				log_info("route %s/%u vanished before delete",
793 				    inet_ntoa(kroute->prefix),
794 				    mask2prefixlen(kroute->netmask.s_addr));
795 				return (0);
796 			}
797 		}
798 		log_warn("send_rtmsg: action %u, prefix %s/%u",
799 		    hdr.rtm_type, inet_ntoa(kroute->prefix),
800 		    mask2prefixlen(kroute->netmask.s_addr));
801 		return (0);
802 	}
803 
804 	return (0);
805 }
806 
807 int
808 fetchtable(void)
809 {
810 	size_t			 len;
811 	int			 mib[7];
812 	char			*buf, *next, *lim;
813 	struct rt_msghdr	*rtm;
814 	struct sockaddr		*sa, *rti_info[RTAX_MAX];
815 	struct sockaddr_in	*sa_in;
816 	struct sockaddr_rtlabel	*label;
817 	struct kroute_node	*kr;
818 	struct iface		*iface = NULL;
819 
820 	mib[0] = CTL_NET;
821 	mib[1] = PF_ROUTE;
822 	mib[2] = 0;
823 	mib[3] = AF_INET;
824 	mib[4] = NET_RT_DUMP;
825 	mib[5] = 0;
826 	mib[6] = kr_state.rdomain;	/* rtableid */
827 
828 	if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) {
829 		log_warn("sysctl");
830 		return (-1);
831 	}
832 	if ((buf = malloc(len)) == NULL) {
833 		log_warn("fetchtable");
834 		return (-1);
835 	}
836 	if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) {
837 		log_warn("sysctl");
838 		free(buf);
839 		return (-1);
840 	}
841 
842 	lim = buf + len;
843 	for (next = buf; next < lim; next += rtm->rtm_msglen) {
844 		rtm = (struct rt_msghdr *)next;
845 		if (rtm->rtm_version != RTM_VERSION)
846 			continue;
847 		sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
848 		get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
849 
850 		if ((sa = rti_info[RTAX_DST]) == NULL)
851 			continue;
852 
853 		/* Skip ARP/ND cache and broadcast routes. */
854 		if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST))
855 			continue;
856 
857 #ifdef RTF_MPATH
858 		if (rtm->rtm_flags & RTF_MPATH)		/* multipath */
859 			continue;
860 #endif
861 
862 		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
863 			log_warn("fetchtable");
864 			free(buf);
865 			return (-1);
866 		}
867 
868 		kr->r.flags = F_KERNEL;
869 		kr->r.priority = rtm->rtm_priority;
870 
871 		switch (sa->sa_family) {
872 		case AF_INET:
873 			kr->r.prefix.s_addr =
874 			    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
875 			sa_in = (struct sockaddr_in *)rti_info[RTAX_NETMASK];
876 			if (rtm->rtm_flags & RTF_STATIC)
877 				kr->r.flags |= F_STATIC;
878 			if (rtm->rtm_flags & RTF_BLACKHOLE)
879 				kr->r.flags |= F_BLACKHOLE;
880 			if (rtm->rtm_flags & RTF_REJECT)
881 				kr->r.flags |= F_REJECT;
882 			if (rtm->rtm_flags & RTF_DYNAMIC)
883 				kr->r.flags |= F_DYNAMIC;
884 			if (sa_in != NULL) {
885 				if (sa_in->sin_len == 0)
886 					break;
887 				kr->r.netmask.s_addr =
888 				    sa_in->sin_addr.s_addr;
889 			} else if (rtm->rtm_flags & RTF_HOST)
890 				kr->r.netmask.s_addr = prefixlen2mask(32);
891 			else
892 				kr->r.netmask.s_addr =
893 				    prefixlen2mask(prefixlen_classful
894 					(kr->r.prefix.s_addr));
895 			break;
896 		default:
897 			free(kr);
898 			continue;
899 		}
900 
901 		kr->r.ifindex = rtm->rtm_index;
902 
903 		iface = if_find_index(rtm->rtm_index);
904 		if (iface != NULL)
905 			kr->r.metric = iface->cost;
906 		else
907 			kr->r.metric = DEFAULT_COST;
908 
909 		if ((sa = rti_info[RTAX_GATEWAY]) != NULL)
910 			switch (sa->sa_family) {
911 			case AF_INET:
912 				if (rtm->rtm_flags & RTF_CONNECTED) {
913 					kr->r.flags |= F_CONNECTED;
914 					break;
915 				}
916 
917 				kr->r.nexthop.s_addr =
918 				    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
919 				break;
920 			case AF_LINK:
921 				/*
922 				 * Traditional BSD connected routes have
923 				 * a gateway of type AF_LINK.
924 				 */
925 				kr->r.flags |= F_CONNECTED;
926 				break;
927 			}
928 
929 		if (rtm->rtm_priority == kr_state.fib_prio) {
930 			send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r);
931 			free(kr);
932 		} else {
933 			if ((label = (struct sockaddr_rtlabel *)
934 			    rti_info[RTAX_LABEL]) != NULL)
935 				kr->r.rtlabel =
936 				    rtlabel_name2id(label->sr_label);
937 			kroute_insert(kr);
938 		}
939 
940 	}
941 	free(buf);
942 	return (0);
943 }
944 
945 int
946 fetchifs(int ifindex)
947 {
948 	size_t			 len;
949 	int			 mib[6];
950 	char			*buf, *next, *lim;
951 	struct if_msghdr	 ifm;
952 	struct kif_node		*kif;
953 	struct sockaddr		*sa, *rti_info[RTAX_MAX];
954 	struct sockaddr_dl	*sdl;
955 
956 	mib[0] = CTL_NET;
957 	mib[1] = PF_ROUTE;
958 	mib[2] = 0;
959 	mib[3] = AF_INET;
960 	mib[4] = NET_RT_IFLIST;
961 	mib[5] = ifindex;
962 
963 	if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) {
964 		log_warn("sysctl");
965 		return (-1);
966 	}
967 	if ((buf = malloc(len)) == NULL) {
968 		log_warn("fetchif");
969 		return (-1);
970 	}
971 	if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) {
972 		log_warn("sysctl");
973 		free(buf);
974 		return (-1);
975 	}
976 
977 	lim = buf + len;
978 	for (next = buf; next < lim; next += ifm.ifm_msglen) {
979 		memcpy(&ifm, next, sizeof(ifm));
980 		if (ifm.ifm_version != RTM_VERSION)
981 			continue;
982 		if (ifm.ifm_type != RTM_IFINFO)
983 			continue;
984 
985 		sa = (struct sockaddr *)(next + sizeof(ifm));
986 		get_rtaddrs(ifm.ifm_addrs, sa, rti_info);
987 
988 		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
989 			log_warn("fetchifs");
990 			free(buf);
991 			return (-1);
992 		}
993 
994 		kif->k.ifindex = ifm.ifm_index;
995 		kif->k.flags = ifm.ifm_flags;
996 		kif->k.link_state = ifm.ifm_data.ifi_link_state;
997 		kif->k.if_type = ifm.ifm_data.ifi_type;
998 		kif->k.baudrate = ifm.ifm_data.ifi_baudrate;
999 		kif->k.mtu = ifm.ifm_data.ifi_mtu;
1000 		kif->k.nh_reachable = (kif->k.flags & IFF_UP) &&
1001 		    LINK_STATE_IS_UP(ifm.ifm_data.ifi_link_state);
1002 		if ((sa = rti_info[RTAX_IFP]) != NULL)
1003 			if (sa->sa_family == AF_LINK) {
1004 				sdl = (struct sockaddr_dl *)sa;
1005 				if (sdl->sdl_nlen >= sizeof(kif->k.ifname))
1006 					memcpy(kif->k.ifname, sdl->sdl_data,
1007 					    sizeof(kif->k.ifname) - 1);
1008 				else if (sdl->sdl_nlen > 0)
1009 					memcpy(kif->k.ifname, sdl->sdl_data,
1010 					    sdl->sdl_nlen);
1011 				/* string already terminated via calloc() */
1012 			}
1013 
1014 		kif_insert(kif);
1015 	}
1016 	free(buf);
1017 	return (0);
1018 }
1019 
1020 int
1021 dispatch_rtmsg(void)
1022 {
1023 	char			 buf[RT_BUF_SIZE];
1024 	ssize_t			 n;
1025 	char			*next, *lim;
1026 	struct rt_msghdr	*rtm;
1027 	struct if_msghdr	 ifm;
1028 	struct sockaddr		*sa, *rti_info[RTAX_MAX];
1029 	struct sockaddr_in	*sa_in;
1030 	struct sockaddr_rtlabel	*label;
1031 	struct kroute_node	*kr;
1032 	struct in_addr		 prefix, nexthop, netmask;
1033 	struct iface		*iface = NULL;
1034 	int			 flags;
1035 	u_short			 ifindex = 0;
1036 	u_int8_t		 metric, prio;
1037 
1038 	if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) {
1039 		if (errno == EAGAIN || errno == EINTR)
1040 			return (0);
1041 		log_warn("dispatch_rtmsg: read error");
1042 		return (-1);
1043 	}
1044 
1045 	if (n == 0) {
1046 		log_warnx("routing socket closed");
1047 		return (-1);
1048 	}
1049 
1050 	lim = buf + n;
1051 	for (next = buf; next < lim; next += rtm->rtm_msglen) {
1052 		rtm = (struct rt_msghdr *)next;
1053 		if (lim < next + sizeof(u_short) ||
1054 		    lim < next + rtm->rtm_msglen)
1055 			fatalx("dispatch_rtmsg: partial rtm in buffer");
1056 		if (rtm->rtm_version != RTM_VERSION)
1057 			continue;
1058 
1059 		prefix.s_addr = 0;
1060 		netmask.s_addr = 0;
1061 		flags = F_KERNEL;
1062 		nexthop.s_addr = 0;
1063 		prio = 0;
1064 
1065 		if (rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_CHANGE ||
1066 		    rtm->rtm_type == RTM_DELETE) {
1067 			sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
1068 			get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
1069 
1070 			if (rtm->rtm_tableid != kr_state.rdomain)
1071 				continue;
1072 
1073 			if (rtm->rtm_pid == kr_state.pid)	/* cause by us */
1074 				continue;
1075 
1076 			if (rtm->rtm_errno)			/* failed attempts... */
1077 				continue;
1078 
1079 			/* Skip ARP/ND cache and broadcast routes. */
1080 			if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST))
1081 				continue;
1082 
1083 			prio = rtm->rtm_priority;
1084 
1085 			switch (sa->sa_family) {
1086 			case AF_INET:
1087 				prefix.s_addr =
1088 				    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
1089 				sa_in = (struct sockaddr_in *)
1090 				    rti_info[RTAX_NETMASK];
1091 				if (sa_in != NULL) {
1092 					if (sa_in->sin_len != 0)
1093 						netmask.s_addr =
1094 						    sa_in->sin_addr.s_addr;
1095 				} else if (rtm->rtm_flags & RTF_HOST)
1096 					netmask.s_addr = prefixlen2mask(32);
1097 				else
1098 					netmask.s_addr =
1099 					    prefixlen2mask(prefixlen_classful(
1100 						prefix.s_addr));
1101 				if (rtm->rtm_flags & RTF_STATIC)
1102 					flags |= F_STATIC;
1103 				if (rtm->rtm_flags & RTF_BLACKHOLE)
1104 					flags |= F_BLACKHOLE;
1105 				if (rtm->rtm_flags & RTF_REJECT)
1106 					flags |= F_REJECT;
1107 				if (rtm->rtm_flags & RTF_DYNAMIC)
1108 					flags |= F_DYNAMIC;
1109 				break;
1110 			default:
1111 				continue;
1112 			}
1113 
1114 			ifindex = rtm->rtm_index;
1115 			if ((sa = rti_info[RTAX_GATEWAY]) != NULL) {
1116 				switch (sa->sa_family) {
1117 				case AF_INET:
1118 					nexthop.s_addr = ((struct
1119 					    sockaddr_in *)sa)->sin_addr.s_addr;
1120 					break;
1121 				case AF_LINK:
1122 					flags |= F_CONNECTED;
1123 					break;
1124 				}
1125 			}
1126 		}
1127 
1128 		switch (rtm->rtm_type) {
1129 		case RTM_ADD:
1130 		case RTM_CHANGE:
1131 			if (nexthop.s_addr == 0 && !(flags & F_CONNECTED)) {
1132 				log_warnx("dispatch_rtmsg no nexthop for %s/%u",
1133 				    inet_ntoa(prefix),
1134 				    mask2prefixlen(netmask.s_addr));
1135 				continue;
1136 			}
1137 
1138 			if ((kr = kroute_find(prefix.s_addr, netmask.s_addr,
1139 			    prio)) != NULL) {
1140 				if (kr->r.flags & F_REDISTRIBUTED)
1141 					flags |= F_REDISTRIBUTED;
1142 				kr->r.nexthop.s_addr = nexthop.s_addr;
1143 				kr->r.flags = flags;
1144 				kr->r.ifindex = ifindex;
1145 				kr->r.priority = prio;
1146 
1147 				rtlabel_unref(kr->r.rtlabel);
1148 				kr->r.rtlabel = 0;
1149 				if ((label = (struct sockaddr_rtlabel *)
1150 				    rti_info[RTAX_LABEL]) != NULL)
1151 					kr->r.rtlabel =
1152 					    rtlabel_name2id(label->sr_label);
1153 
1154 				if (kif_validate(kr->r.ifindex))
1155 					kr->r.flags &= ~F_DOWN;
1156 				else
1157 					kr->r.flags |= F_DOWN;
1158 
1159 				/* just readd, the RDE will care */
1160 				kr_redistribute(IMSG_NETWORK_ADD, &kr->r);
1161 			} else {
1162 				if ((kr = calloc(1,
1163 				    sizeof(struct kroute_node))) == NULL) {
1164 					log_warn("dispatch_rtmsg");
1165 					return (-1);
1166 				}
1167 
1168 				iface = if_find_index(rtm->rtm_index);
1169 				if (iface != NULL)
1170 					metric = iface->cost;
1171 				else
1172 					metric = DEFAULT_COST;
1173 
1174 				kr->r.prefix.s_addr = prefix.s_addr;
1175 				kr->r.netmask.s_addr = netmask.s_addr;
1176 				kr->r.nexthop.s_addr = nexthop.s_addr;
1177 				kr->r.metric = metric;
1178 				kr->r.flags = flags;
1179 				kr->r.ifindex = ifindex;
1180 
1181 				if ((label = (struct sockaddr_rtlabel *)
1182 				    rti_info[RTAX_LABEL]) != NULL)
1183 					kr->r.rtlabel =
1184 					    rtlabel_name2id(label->sr_label);
1185 
1186 				kroute_insert(kr);
1187 			}
1188 			break;
1189 		case RTM_DELETE:
1190 			if ((kr = kroute_find(prefix.s_addr, netmask.s_addr,
1191 			    prio)) == NULL)
1192 				continue;
1193 			if (!(kr->r.flags & F_KERNEL))
1194 				continue;
1195 			if (kroute_remove(kr) == -1)
1196 				return (-1);
1197 			break;
1198 		case RTM_IFINFO:
1199 			memcpy(&ifm, next, sizeof(ifm));
1200 			if_change(ifm.ifm_index, ifm.ifm_flags,
1201 			    &ifm.ifm_data);
1202 			break;
1203 		case RTM_IFANNOUNCE:
1204 			if_announce(next);
1205 			break;
1206 		default:
1207 			/* ignore for now */
1208 			break;
1209 		}
1210 	}
1211 	return (0);
1212 }
1213