xref: /freebsd/sys/net/route/route_helpers.c (revision dad64f0e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2020 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 #include "opt_route.h"
33 
34 #include <sys/param.h>
35 #include <sys/jail.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/syslog.h>
42 #include <sys/sysproto.h>
43 #include <sys/proc.h>
44 #include <sys/domain.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/rmlock.h>
48 
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_dl.h>
52 #include <net/route.h>
53 #include <net/route/route_ctl.h>
54 #include <net/route/route_var.h>
55 #include <net/route/nhop_utils.h>
56 #include <net/route/nhop.h>
57 #include <net/route/nhop_var.h>
58 #ifdef INET
59 #include <netinet/in_fib.h>
60 #endif
61 #ifdef INET6
62 #include <netinet6/in6_fib.h>
63 #include <netinet6/in6_var.h>
64 #endif
65 #include <net/vnet.h>
66 
67 #define	DEBUG_MOD_NAME	rt_helpers
68 #define	DEBUG_MAX_LEVEL	LOG_DEBUG2
69 #include <net/route/route_debug.h>
70 _DECLARE_DEBUG(LOG_INFO);
71 
72 /*
73  * RIB helper functions.
74  */
75 
76 void
77 rib_walk_ext_locked(struct rib_head *rnh, rib_walktree_f_t *wa_f,
78     rib_walk_hook_f_t *hook_f, void *arg)
79 {
80 	if (hook_f != NULL)
81 		hook_f(rnh, RIB_WALK_HOOK_PRE, arg);
82 	rnh->rnh_walktree(&rnh->head, (walktree_f_t *)wa_f, arg);
83 	if (hook_f != NULL)
84 		hook_f(rnh, RIB_WALK_HOOK_POST, arg);
85 }
86 
87 /*
88  * Calls @wa_f with @arg for each entry in the table specified by
89  * @af and @fibnum.
90  *
91  * @ss_t callback is called before and after the tree traversal
92  *  while holding table lock.
93  *
94  * Table is traversed under read lock unless @wlock is set.
95  */
96 void
97 rib_walk_ext_internal(struct rib_head *rnh, bool wlock, rib_walktree_f_t *wa_f,
98     rib_walk_hook_f_t *hook_f, void *arg)
99 {
100 	RIB_RLOCK_TRACKER;
101 
102 	if (wlock)
103 		RIB_WLOCK(rnh);
104 	else
105 		RIB_RLOCK(rnh);
106 	rib_walk_ext_locked(rnh, wa_f, hook_f, arg);
107 	if (wlock)
108 		RIB_WUNLOCK(rnh);
109 	else
110 		RIB_RUNLOCK(rnh);
111 }
112 
113 void
114 rib_walk_ext(uint32_t fibnum, int family, bool wlock, rib_walktree_f_t *wa_f,
115     rib_walk_hook_f_t *hook_f, void *arg)
116 {
117 	struct rib_head *rnh;
118 
119 	if ((rnh = rt_tables_get_rnh(fibnum, family)) != NULL)
120 		rib_walk_ext_internal(rnh, wlock, wa_f, hook_f, arg);
121 }
122 
123 /*
124  * Calls @wa_f with @arg for each entry in the table specified by
125  * @af and @fibnum.
126  *
127  * Table is traversed under read lock unless @wlock is set.
128  */
129 void
130 rib_walk(uint32_t fibnum, int family, bool wlock, rib_walktree_f_t *wa_f,
131     void *arg)
132 {
133 
134 	rib_walk_ext(fibnum, family, wlock, wa_f, NULL, arg);
135 }
136 
137 /*
138  * Calls @wa_f with @arg for each entry in the table matching @prefix/@mask.
139  *
140  * The following flags are supported:
141  *  RIB_FLAG_WLOCK: acquire exclusive lock
142  *  RIB_FLAG_LOCKED: Assumes the table is already locked & skip locking
143  *
144  * By default, table is traversed under read lock.
145  */
146 void
147 rib_walk_from(uint32_t fibnum, int family, uint32_t flags, struct sockaddr *prefix,
148     struct sockaddr *mask, rib_walktree_f_t *wa_f, void *arg)
149 {
150 	RIB_RLOCK_TRACKER;
151 	struct rib_head *rnh = rt_tables_get_rnh(fibnum, family);
152 
153 	if (rnh == NULL)
154 		return;
155 
156 	if (flags & RIB_FLAG_WLOCK)
157 		RIB_WLOCK(rnh);
158 	else if (!(flags & RIB_FLAG_LOCKED))
159 		RIB_RLOCK(rnh);
160 
161 	rnh->rnh_walktree_from(&rnh->head, prefix, mask, (walktree_f_t *)wa_f, arg);
162 
163 	if (flags & RIB_FLAG_WLOCK)
164 		RIB_WUNLOCK(rnh);
165 	else if (!(flags & RIB_FLAG_LOCKED))
166 		RIB_RUNLOCK(rnh);
167 }
168 
169 /*
170  * Iterates over all existing fibs in system calling
171  *  @hook_f function before/after traversing each fib.
172  *  Calls @wa_f function for each element in current fib.
173  * If af is not AF_UNSPEC, iterates over fibs in particular
174  * address family.
175  */
176 void
177 rib_foreach_table_walk(int family, bool wlock, rib_walktree_f_t *wa_f,
178     rib_walk_hook_f_t *hook_f, void *arg)
179 {
180 
181 	for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) {
182 		/* Do we want some specific family? */
183 		if (family != AF_UNSPEC) {
184 			rib_walk_ext(fibnum, family, wlock, wa_f, hook_f, arg);
185 			continue;
186 		}
187 
188 		for (int i = 1; i <= AF_MAX; i++)
189 			rib_walk_ext(fibnum, i, wlock, wa_f, hook_f, arg);
190 	}
191 }
192 
193 /*
194  * Iterates over all existing fibs in system and deletes each element
195  *  for which @filter_f function returns non-zero value.
196  * If @family is not AF_UNSPEC, iterates over fibs in particular
197  * address family.
198  */
199 void
200 rib_foreach_table_walk_del(int family, rib_filter_f_t *filter_f, void *arg)
201 {
202 
203 	for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) {
204 		/* Do we want some specific family? */
205 		if (family != AF_UNSPEC) {
206 			rib_walk_del(fibnum, family, filter_f, arg, 0);
207 			continue;
208 		}
209 
210 		for (int i = 1; i <= AF_MAX; i++)
211 			rib_walk_del(fibnum, i, filter_f, arg, 0);
212 	}
213 }
214 
215 
216 /*
217  * Wrapper for the control plane functions for performing af-agnostic
218  *  lookups.
219  * @fibnum: fib to perform the lookup.
220  * @dst: sockaddr with family and addr filled in. IPv6 addresses needs to be in
221  *  deembedded from.
222  * @flags: fib(9) flags.
223  * @flowid: flow id for path selection in multipath use case.
224  *
225  * Returns nhop_object or NULL.
226  *
227  * Requires NET_EPOCH.
228  *
229  */
230 struct nhop_object *
231 rib_lookup(uint32_t fibnum, const struct sockaddr *dst, uint32_t flags,
232     uint32_t flowid)
233 {
234 	struct nhop_object *nh;
235 
236 	nh = NULL;
237 
238 	switch (dst->sa_family) {
239 #ifdef INET
240 	case AF_INET:
241 	{
242 		const struct sockaddr_in *a = (const struct sockaddr_in *)dst;
243 		nh = fib4_lookup(fibnum, a->sin_addr, 0, flags, flowid);
244 		break;
245 	}
246 #endif
247 #ifdef INET6
248 	case AF_INET6:
249 	{
250 		const struct sockaddr_in6 *a = (const struct sockaddr_in6*)dst;
251 		nh = fib6_lookup(fibnum, &a->sin6_addr, a->sin6_scope_id,
252 		    flags, flowid);
253 		break;
254 	}
255 #endif
256 	}
257 
258 	return (nh);
259 }
260 
261 #ifdef ROUTE_MPATH
262 static void
263 notify_add(struct rib_cmd_info *rc, const struct weightened_nhop *wn_src,
264     route_notification_t *cb, void *cbdata)
265 {
266 	rc->rc_nh_new = wn_src->nh;
267 	rc->rc_nh_weight = wn_src->weight;
268 
269 	IF_DEBUG_LEVEL(LOG_DEBUG2) {
270 		char nhbuf[NHOP_PRINT_BUFSIZE] __unused;
271 		FIB_NH_LOG(LOG_DEBUG2, wn_src->nh, "RTM_ADD for %s @ w=%u",
272 		    nhop_print_buf(wn_src->nh, nhbuf, sizeof(nhbuf)),
273 		    wn_src->weight);
274 	}
275 	cb(rc, cbdata);
276 }
277 
278 static void
279 notify_del(struct rib_cmd_info *rc, const struct weightened_nhop *wn_src,
280     route_notification_t *cb, void *cbdata)
281 {
282 	rc->rc_nh_old = wn_src->nh;
283 	rc->rc_nh_weight = wn_src->weight;
284 
285 	IF_DEBUG_LEVEL(LOG_DEBUG2) {
286 		char nhbuf[NHOP_PRINT_BUFSIZE] __unused;
287 		FIB_NH_LOG(LOG_DEBUG2, wn_src->nh, "RTM_DEL for %s @ w=%u",
288 		    nhop_print_buf(wn_src->nh, nhbuf, sizeof(nhbuf)),
289 		    wn_src->weight);
290 	}
291 	cb(rc, cbdata);
292 }
293 
294 static void
295 decompose_change_notification(const struct rib_cmd_info *rc, route_notification_t *cb,
296     void *cbdata)
297 {
298 	uint32_t num_old, num_new;
299 	const struct weightened_nhop *wn_old, *wn_new;
300 	struct weightened_nhop tmp = { NULL, 0 };
301 	uint32_t idx_old = 0, idx_new = 0;
302 
303 	struct rib_cmd_info rc_del = { .rc_cmd = RTM_DELETE, .rc_rt = rc->rc_rt };
304 	struct rib_cmd_info rc_add = { .rc_cmd = RTM_ADD, .rc_rt = rc->rc_rt };
305 
306 	if (NH_IS_NHGRP(rc->rc_nh_old)) {
307 		wn_old = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_old, &num_old);
308 	} else {
309 		tmp.nh = rc->rc_nh_old;
310 		tmp.weight = rc->rc_nh_weight;
311 		wn_old = &tmp;
312 		num_old = 1;
313 	}
314 	if (NH_IS_NHGRP(rc->rc_nh_new)) {
315 		wn_new = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_new, &num_new);
316 	} else {
317 		tmp.nh = rc->rc_nh_new;
318 		tmp.weight = rc->rc_nh_weight;
319 		wn_new = &tmp;
320 		num_new = 1;
321 	}
322 	IF_DEBUG_LEVEL(LOG_DEBUG) {
323 		char buf_old[NHOP_PRINT_BUFSIZE], buf_new[NHOP_PRINT_BUFSIZE];
324 		nhop_print_buf_any(rc->rc_nh_old, buf_old, NHOP_PRINT_BUFSIZE);
325 		nhop_print_buf_any(rc->rc_nh_new, buf_new, NHOP_PRINT_BUFSIZE);
326 		FIB_NH_LOG(LOG_DEBUG, wn_old[0].nh, "change %s -> %s", buf_old, buf_new);
327 	}
328 
329 	/* Use the fact that each @wn array is sorted */
330 	/*
331 	 * Here we have one (or two) multipath groups and transition
332 	 *  between them needs to be reported to the caller, using series
333 	 *  of primitive (RTM_DEL, RTM_ADD) operations.
334 	 *
335 	 * Leverage the fact that each nexthop group has its nexthops sorted
336 	 *  by their indices.
337 	 * [1] -> [1, 2] = A{2}
338 	 * [1, 2] -> [1] = D{2}
339 	 * [1, 2, 4] -> [1, 3, 4] = D{2}, A{3}
340 	 * [1, 2] -> [3, 4] = D{1}, D{2}, A{3}, A{4]
341 	 */
342 	while ((idx_old < num_old) && (idx_new < num_new)) {
343 		uint32_t nh_idx_old = wn_old[idx_old].nh->nh_priv->nh_idx;
344 		uint32_t nh_idx_new = wn_new[idx_new].nh->nh_priv->nh_idx;
345 
346 		if (nh_idx_old == nh_idx_new) {
347 			if (wn_old[idx_old].weight != wn_new[idx_new].weight) {
348 				/* Update weight by providing del/add notifications */
349 				notify_del(&rc_del, &wn_old[idx_old], cb, cbdata);
350 				notify_add(&rc_add, &wn_new[idx_new], cb, cbdata);
351 			}
352 			idx_old++;
353 			idx_new++;
354 		} else if (nh_idx_old < nh_idx_new) {
355 			/* [1, ~2~, 4], [1, ~3~, 4] */
356 			notify_del(&rc_del, &wn_old[idx_old], cb, cbdata);
357 			idx_old++;
358 		} else {
359 			/* nh_idx_old > nh_idx_new. */
360 			notify_add(&rc_add, &wn_new[idx_new], cb, cbdata);
361 			idx_new++;
362 		}
363 	}
364 
365 	while (idx_old < num_old) {
366 		notify_del(&rc_del, &wn_old[idx_old], cb, cbdata);
367 		idx_old++;
368 	}
369 
370 	while (idx_new < num_new) {
371 		notify_add(&rc_add, &wn_new[idx_new], cb, cbdata);
372 		idx_new++;
373 	}
374 }
375 
376 /*
377  * Decompose multipath cmd info @rc into a list of add/del/change
378  *  single-path operations, calling @cb callback for each operation.
379  * Assumes at least one of the nexthops in @rc is multipath.
380  */
381 void
382 rib_decompose_notification(const struct rib_cmd_info *rc, route_notification_t *cb,
383     void *cbdata)
384 {
385 	const struct weightened_nhop *wn;
386 	uint32_t num_nhops;
387 	struct rib_cmd_info rc_new;
388 
389 	rc_new = *rc;
390 	switch (rc->rc_cmd) {
391 	case RTM_ADD:
392 		if (!NH_IS_NHGRP(rc->rc_nh_new))
393 			return;
394 		wn = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_new, &num_nhops);
395 		for (uint32_t i = 0; i < num_nhops; i++) {
396 			notify_add(&rc_new, &wn[i], cb, cbdata);
397 		}
398 		break;
399 	case RTM_DELETE:
400 		if (!NH_IS_NHGRP(rc->rc_nh_old))
401 			return;
402 		wn = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_old, &num_nhops);
403 		for (uint32_t i = 0; i < num_nhops; i++) {
404 			notify_del(&rc_new, &wn[i], cb, cbdata);
405 		}
406 		break;
407 	case RTM_CHANGE:
408 		if (!NH_IS_NHGRP(rc->rc_nh_old) && !NH_IS_NHGRP(rc->rc_nh_new))
409 			return;
410 		decompose_change_notification(rc, cb, cbdata);
411 		break;
412 	}
413 }
414 #endif
415 
416 union sockaddr_union {
417 	struct sockaddr		sa;
418 	struct sockaddr_in	sin;
419 	struct sockaddr_in6	sin6;
420 	char			_buf[32];
421 };
422 
423 /*
424  * Creates nexhops suitable for using as a default route nhop.
425  * Helper for the various kernel subsystems adding/changing default route.
426  */
427 int
428 rib_add_default_route(uint32_t fibnum, int family, struct ifnet *ifp,
429     struct sockaddr *gw, struct rib_cmd_info *rc)
430 {
431 	struct route_nhop_data rnd = { .rnd_weight = RT_DEFAULT_WEIGHT };
432 	union sockaddr_union saun = {};
433 	struct sockaddr *dst = &saun.sa;
434 	int error;
435 
436 	switch (family) {
437 #ifdef INET
438 	case AF_INET:
439 		saun.sin.sin_family = AF_INET;
440 		saun.sin.sin_len = sizeof(struct sockaddr_in);
441 		break;
442 #endif
443 #ifdef INET6
444 	case AF_INET6:
445 		saun.sin6.sin6_family = AF_INET6;
446 		saun.sin6.sin6_len = sizeof(struct sockaddr_in6);
447 		break;
448 #endif
449 	default:
450 		return (EAFNOSUPPORT);
451 	}
452 
453 	struct ifaddr *ifa = ifaof_ifpforaddr(gw, ifp);
454 	if (ifa == NULL)
455 		return (ENOENT);
456 
457 	struct nhop_object *nh = nhop_alloc(fibnum, family);
458 	if (nh == NULL)
459 		return (ENOMEM);
460 
461 	nhop_set_gw(nh, gw, true);
462 	nhop_set_transmit_ifp(nh, ifp);
463 	nhop_set_src(nh, ifa);
464 	nhop_set_pxtype_flag(nh, NHF_DEFAULT);
465 	rnd.rnd_nhop = nhop_get_nhop(nh, &error);
466 
467 	if (error == 0)
468 		error = rib_add_route_px(fibnum, dst, 0, &rnd, RTM_F_CREATE, rc);
469 	return (error);
470 }
471 
472 #ifdef INET
473 /*
474  * Checks if the found key in the trie contains (<=) a prefix covering
475  *  @paddr/@plen.
476  * Returns the most specific rtentry matching the condition or NULL.
477  */
478 static struct rtentry *
479 get_inet_parent_prefix(uint32_t fibnum, struct in_addr addr, int plen)
480 {
481 	struct route_nhop_data rnd;
482 	struct rtentry *rt;
483 	struct in_addr addr4;
484 	uint32_t scopeid;
485 	int parent_plen;
486 	struct radix_node *rn;
487 
488 	rt = fib4_lookup_rt(fibnum, addr, 0, NHR_UNLOCKED, &rnd);
489 	if (rt == NULL)
490 		return (NULL);
491 
492 	rt_get_inet_prefix_plen(rt, &addr4, &parent_plen, &scopeid);
493 	if (parent_plen <= plen)
494 		return (rt);
495 
496 	/*
497 	 * There can be multiple prefixes associated with the found key:
498 	 * 10.0.0.0 -> 10.0.0.0/24, 10.0.0.0/23, 10.0.0.0/22, etc.
499 	 * All such prefixes are linked via rn_dupedkey, from most specific
500 	 *  to least specific. Iterate over them to check if any of these
501 	 *  prefixes are wider than desired plen.
502 	 */
503 	rn = (struct radix_node *)rt;
504 	while ((rn = rn_nextprefix(rn)) != NULL) {
505 		rt = RNTORT(rn);
506 		rt_get_inet_prefix_plen(rt, &addr4, &parent_plen, &scopeid);
507 		if (parent_plen <= plen)
508 			return (rt);
509 	}
510 
511 	return (NULL);
512 }
513 
514 /*
515  * Returns the most specific prefix containing (>) @paddr/plen.
516  */
517 struct rtentry *
518 rt_get_inet_parent(uint32_t fibnum, struct in_addr addr, int plen)
519 {
520 	struct in_addr lookup_addr = { .s_addr = INADDR_BROADCAST };
521 	struct in_addr addr4 = addr;
522 	struct in_addr mask4;
523 	struct rtentry *rt;
524 
525 	while (plen-- > 0) {
526 		/* Calculate wider mask & new key to lookup */
527 		mask4.s_addr = htonl(plen ? ~((1 << (32 - plen)) - 1) : 0);
528 		addr4.s_addr = htonl(ntohl(addr4.s_addr) & ntohl(mask4.s_addr));
529 		if (addr4.s_addr == lookup_addr.s_addr) {
530 			/* Skip lookup if the key is the same */
531 			continue;
532 		}
533 		lookup_addr = addr4;
534 
535 		rt = get_inet_parent_prefix(fibnum, lookup_addr, plen);
536 		if (rt != NULL)
537 			return (rt);
538 	}
539 
540 	return (NULL);
541 }
542 #endif
543 
544 #ifdef INET6
545 /*
546  * Checks if the found key in the trie contains (<=) a prefix covering
547  *  @paddr/@plen.
548  * Returns the most specific rtentry matching the condition or NULL.
549  */
550 static struct rtentry *
551 get_inet6_parent_prefix(uint32_t fibnum, const struct in6_addr *paddr, int plen)
552 {
553 	struct route_nhop_data rnd;
554 	struct rtentry *rt;
555 	struct in6_addr addr6;
556 	uint32_t scopeid;
557 	int parent_plen;
558 	struct radix_node *rn;
559 
560 	rt = fib6_lookup_rt(fibnum, paddr, 0, NHR_UNLOCKED, &rnd);
561 	if (rt == NULL)
562 		return (NULL);
563 
564 	rt_get_inet6_prefix_plen(rt, &addr6, &parent_plen, &scopeid);
565 	if (parent_plen <= plen)
566 		return (rt);
567 
568 	/*
569 	 * There can be multiple prefixes associated with the found key:
570 	 * 2001:db8:1::/64 -> 2001:db8:1::/56, 2001:db8:1::/48, etc.
571 	 * All such prefixes are linked via rn_dupedkey, from most specific
572 	 *  to least specific. Iterate over them to check if any of these
573 	 *  prefixes are wider than desired plen.
574 	 */
575 	rn = (struct radix_node *)rt;
576 	while ((rn = rn_nextprefix(rn)) != NULL) {
577 		rt = RNTORT(rn);
578 		rt_get_inet6_prefix_plen(rt, &addr6, &parent_plen, &scopeid);
579 		if (parent_plen <= plen)
580 			return (rt);
581 	}
582 
583 	return (NULL);
584 }
585 
586 void
587 ip6_writemask(struct in6_addr *addr6, uint8_t mask)
588 {
589 	uint32_t *cp;
590 
591 	for (cp = (uint32_t *)addr6; mask >= 32; mask -= 32)
592 		*cp++ = 0xFFFFFFFF;
593 	if (mask > 0)
594 		*cp = htonl(mask ? ~((1 << (32 - mask)) - 1) : 0);
595 }
596 
597 /*
598  * Returns the most specific prefix containing (>) @paddr/plen.
599  */
600 struct rtentry *
601 rt_get_inet6_parent(uint32_t fibnum, const struct in6_addr *paddr, int plen)
602 {
603 	struct in6_addr lookup_addr = in6mask128;
604 	struct in6_addr addr6 = *paddr;
605 	struct in6_addr mask6;
606 	struct rtentry *rt;
607 
608 	while (plen-- > 0) {
609 		/* Calculate wider mask & new key to lookup */
610 		ip6_writemask(&mask6, plen);
611 		IN6_MASK_ADDR(&addr6, &mask6);
612 		if (IN6_ARE_ADDR_EQUAL(&addr6, &lookup_addr)) {
613 			/* Skip lookup if the key is the same */
614 			continue;
615 		}
616 		lookup_addr = addr6;
617 
618 		rt = get_inet6_parent_prefix(fibnum, &lookup_addr, plen);
619 		if (rt != NULL)
620 			return (rt);
621 	}
622 
623 	return (NULL);
624 }
625 #endif
626 
627 /*
628  * Prints rtentry @rt data in the provided @buf.
629  * Example: rt/192.168.0.0/24
630  */
631 char *
632 rt_print_buf(const struct rtentry *rt, char *buf, size_t bufsize)
633 {
634 #if defined(INET) || defined(INET6)
635 	char abuf[INET6_ADDRSTRLEN];
636 	uint32_t scopeid;
637 	int plen;
638 #endif
639 
640 	switch (rt_get_family(rt)) {
641 #ifdef INET
642 	case AF_INET:
643 		{
644 			struct in_addr addr4;
645 			rt_get_inet_prefix_plen(rt, &addr4, &plen, &scopeid);
646 			inet_ntop(AF_INET, &addr4, abuf, sizeof(abuf));
647 			snprintf(buf, bufsize, "rt/%s/%d", abuf, plen);
648 		}
649 		break;
650 #endif
651 #ifdef INET6
652 	case AF_INET6:
653 		{
654 			struct in6_addr addr6;
655 			rt_get_inet6_prefix_plen(rt, &addr6, &plen, &scopeid);
656 			inet_ntop(AF_INET6, &addr6, abuf, sizeof(abuf));
657 			snprintf(buf, bufsize, "rt/%s/%d", abuf, plen);
658 		}
659 		break;
660 #endif
661 	default:
662 		snprintf(buf, bufsize, "rt/unknown_af#%d", rt_get_family(rt));
663 		break;
664 	}
665 
666 	return (buf);
667 }
668 
669 const char *
670 rib_print_cmd(int rib_cmd)
671 {
672 	switch (rib_cmd) {
673 	case RTM_ADD:
674 		return ("RTM_ADD");
675 	case RTM_CHANGE:
676 		return ("RTM_CHANGE");
677 	case RTM_DELETE:
678 		return ("RTM_DELETE");
679 	case RTM_GET:
680 		return ("RTM_GET");
681 	}
682 
683 	return ("UNKNOWN");
684 }
685