xref: /openbsd/sys/net/route.c (revision 6f40fd34)
1 /*	$OpenBSD: route.c,v 1.359 2017/06/09 12:56:43 mpi Exp $	*/
2 /*	$NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)route.c	8.2 (Berkeley) 11/15/93
62  */
63 
64 /*
65  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
66  *
67  * NRL grants permission for redistribution and use in source and binary
68  * forms, with or without modification, of the software and documentation
69  * created at NRL provided that the following conditions are met:
70  *
71  * 1. Redistributions of source code must retain the above copyright
72  *    notice, this list of conditions and the following disclaimer.
73  * 2. Redistributions in binary form must reproduce the above copyright
74  *    notice, this list of conditions and the following disclaimer in the
75  *    documentation and/or other materials provided with the distribution.
76  * 3. All advertising materials mentioning features or use of this software
77  *    must display the following acknowledgements:
78  *	This product includes software developed by the University of
79  *	California, Berkeley and its contributors.
80  *	This product includes software developed at the Information
81  *	Technology Division, US Naval Research Laboratory.
82  * 4. Neither the name of the NRL nor the names of its contributors
83  *    may be used to endorse or promote products derived from this software
84  *    without specific prior written permission.
85  *
86  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
87  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
88  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
89  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
90  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
91  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
92  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
93  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
94  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
95  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
96  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97  *
98  * The views and conclusions contained in the software and documentation
99  * are those of the authors and should not be interpreted as representing
100  * official policies, either expressed or implied, of the US Naval
101  * Research Laboratory (NRL).
102  */
103 
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/mbuf.h>
107 #include <sys/socket.h>
108 #include <sys/socketvar.h>
109 #include <sys/timeout.h>
110 #include <sys/domain.h>
111 #include <sys/protosw.h>
112 #include <sys/ioctl.h>
113 #include <sys/kernel.h>
114 #include <sys/queue.h>
115 #include <sys/pool.h>
116 #include <sys/atomic.h>
117 
118 #include <net/if.h>
119 #include <net/if_var.h>
120 #include <net/if_dl.h>
121 #include <net/route.h>
122 
123 #include <netinet/in.h>
124 #include <netinet/ip_var.h>
125 #include <netinet/in_var.h>
126 
127 #ifdef INET6
128 #include <netinet/ip6.h>
129 #include <netinet6/ip6_var.h>
130 #include <netinet6/in6_var.h>
131 #endif
132 
133 #ifdef MPLS
134 #include <netmpls/mpls.h>
135 #endif
136 
137 #ifdef IPSEC
138 #include <netinet/ip_ipsp.h>
139 #include <net/if_enc.h>
140 #endif
141 
142 #ifdef BFD
143 #include <net/bfd.h>
144 #endif
145 
146 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
147 
148 /* Give some jitter to hash, to avoid synchronization between routers. */
149 static uint32_t		rt_hashjitter;
150 
151 extern unsigned int	rtmap_limit;
152 
153 struct cpumem *		rtcounters;
154 int			rttrash;	/* routes not in table but not freed */
155 int			ifatrash;	/* ifas not in ifp list but not free */
156 
157 struct pool		rtentry_pool;	/* pool for rtentry structures */
158 struct pool		rttimer_pool;	/* pool for rttimer structures */
159 
160 void	rt_timer_init(void);
161 int	rt_setgwroute(struct rtentry *, u_int);
162 void	rt_putgwroute(struct rtentry *);
163 int	rtflushclone1(struct rtentry *, void *, u_int);
164 void	rtflushclone(unsigned int, struct rtentry *);
165 int	rt_ifa_purge_walker(struct rtentry *, void *, unsigned int);
166 struct rtentry *rt_match(struct sockaddr *, uint32_t *, int, unsigned int);
167 struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *);
168 
169 struct	ifaddr *ifa_ifwithroute(int, struct sockaddr *, struct sockaddr *,
170 		    u_int);
171 
172 #ifdef DDB
173 void	db_print_sa(struct sockaddr *);
174 void	db_print_ifa(struct ifaddr *);
175 int	db_show_rtentry(struct rtentry *, void *, unsigned int);
176 #endif
177 
178 #define	LABELID_MAX	50000
179 
180 struct rt_label {
181 	TAILQ_ENTRY(rt_label)	rtl_entry;
182 	char			rtl_name[RTLABEL_LEN];
183 	u_int16_t		rtl_id;
184 	int			rtl_ref;
185 };
186 
187 TAILQ_HEAD(rt_labels, rt_label)	rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels);
188 
189 void
190 route_init(void)
191 {
192 	rtcounters = counters_alloc(rts_ncounters);
193 
194 	pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_SOFTNET, 0,
195 	    "rtentry", NULL);
196 
197 	while (rt_hashjitter == 0)
198 		rt_hashjitter = arc4random();
199 
200 #ifdef BFD
201 	bfdinit();
202 #endif
203 }
204 
205 /*
206  * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise.
207  */
208 int
209 rtisvalid(struct rtentry *rt)
210 {
211 	if (rt == NULL)
212 		return (0);
213 
214 	if (!ISSET(rt->rt_flags, RTF_UP))
215 		return (0);
216 
217 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
218 		KASSERT(rt->rt_gwroute != NULL);
219 		KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY));
220 		if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP))
221 			return (0);
222 	}
223 
224 	return (1);
225 }
226 
227 /*
228  * Do the actual lookup for rtalloc(9), do not use directly!
229  *
230  * Return the best matching entry for the destination ``dst''.
231  *
232  * "RT_RESOLVE" means that a corresponding L2 entry should
233  *   be added to the routing table and resolved (via ARP or
234  *   NDP), if it does not exist.
235  */
236 struct rtentry *
237 rt_match(struct sockaddr *dst, uint32_t *src, int flags, unsigned int tableid)
238 {
239 	struct rtentry		*rt0, *rt = NULL;
240 	int			 error = 0;
241 
242 	NET_ASSERT_LOCKED();
243 
244 	rt = rtable_match(tableid, dst, src);
245 	if (rt != NULL) {
246 		if ((rt->rt_flags & RTF_CLONING) && ISSET(flags, RT_RESOLVE)) {
247 			struct rt_addrinfo	 info;
248 
249 			rt0 = rt;
250 
251 			memset(&info, 0, sizeof(info));
252 			info.rti_info[RTAX_DST] = dst;
253 
254 			KERNEL_LOCK();
255 			/*
256 			 * The priority of cloned route should be different
257 			 * to avoid conflict with /32 cloning routes.
258 			 *
259 			 * It should also be higher to let the ARP layer find
260 			 * cloned routes instead of the cloning one.
261 			 */
262 			error = rtrequest(RTM_RESOLVE, &info,
263 			    rt->rt_priority - 1, &rt, tableid);
264 			if (error) {
265 				rtm_miss(RTM_MISS, &info, 0, RTP_NONE, 0,
266 				    error, tableid);
267 			} else {
268 				/* Inform listeners of the new route */
269 				rtm_send(rt, RTM_ADD, tableid);
270 				rtfree(rt0);
271 			}
272 			KERNEL_UNLOCK();
273 		}
274 		rt->rt_use++;
275 	} else
276 		rtstat_inc(rts_unreach);
277 	return (rt);
278 }
279 
280 #ifndef SMALL_KERNEL
281 /*
282  * Originated from bridge_hash() in if_bridge.c
283  */
284 #define mix(a, b, c) do {						\
285 	a -= b; a -= c; a ^= (c >> 13);					\
286 	b -= c; b -= a; b ^= (a << 8);					\
287 	c -= a; c -= b; c ^= (b >> 13);					\
288 	a -= b; a -= c; a ^= (c >> 12);					\
289 	b -= c; b -= a; b ^= (a << 16);					\
290 	c -= a; c -= b; c ^= (b >> 5);					\
291 	a -= b; a -= c; a ^= (c >> 3);					\
292 	b -= c; b -= a; b ^= (a << 10);					\
293 	c -= a; c -= b; c ^= (b >> 15);					\
294 } while (0)
295 
296 int
297 rt_hash(struct rtentry *rt, struct sockaddr *dst, uint32_t *src)
298 {
299 	uint32_t a, b, c;
300 
301 	if (src == NULL || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH))
302 		return (-1);
303 
304 	a = b = 0x9e3779b9;
305 	c = rt_hashjitter;
306 
307 	switch (dst->sa_family) {
308 	case AF_INET:
309 	    {
310 		struct sockaddr_in *sin;
311 
312 		if (!ipmultipath)
313 			return (-1);
314 
315 		sin = satosin(dst);
316 		a += sin->sin_addr.s_addr;
317 		b += (src != NULL) ? src[0] : 0;
318 		mix(a, b, c);
319 		break;
320 	    }
321 #ifdef INET6
322 	case AF_INET6:
323 	    {
324 		struct sockaddr_in6 *sin6;
325 
326 		if (!ip6_multipath)
327 			return (-1);
328 
329 		sin6 = satosin6(dst);
330 		a += sin6->sin6_addr.s6_addr32[0];
331 		b += sin6->sin6_addr.s6_addr32[2];
332 		c += (src != NULL) ? src[0] : 0;
333 		mix(a, b, c);
334 		a += sin6->sin6_addr.s6_addr32[1];
335 		b += sin6->sin6_addr.s6_addr32[3];
336 		c += (src != NULL) ? src[1] : 0;
337 		mix(a, b, c);
338 		a += sin6->sin6_addr.s6_addr32[2];
339 		b += sin6->sin6_addr.s6_addr32[1];
340 		c += (src != NULL) ? src[2] : 0;
341 		mix(a, b, c);
342 		a += sin6->sin6_addr.s6_addr32[3];
343 		b += sin6->sin6_addr.s6_addr32[0];
344 		c += (src != NULL) ? src[3] : 0;
345 		mix(a, b, c);
346 		break;
347 	    }
348 #endif /* INET6 */
349 	}
350 
351 	return (c & 0xffff);
352 }
353 
354 /*
355  * Allocate a route, potentially using multipath to select the peer.
356  */
357 struct rtentry *
358 rtalloc_mpath(struct sockaddr *dst, uint32_t *src, unsigned int rtableid)
359 {
360 	return (rt_match(dst, src, RT_RESOLVE, rtableid));
361 }
362 #endif /* SMALL_KERNEL */
363 
364 /*
365  * Look in the routing table for the best matching entry for
366  * ``dst''.
367  *
368  * If a route with a gateway is found and its next hop is no
369  * longer valid, try to cache it.
370  */
371 struct rtentry *
372 rtalloc(struct sockaddr *dst, int flags, unsigned int rtableid)
373 {
374 	return (rt_match(dst, NULL, flags, rtableid));
375 }
376 
377 /*
378  * Cache the route entry corresponding to a reachable next hop in
379  * the gateway entry ``rt''.
380  */
381 int
382 rt_setgwroute(struct rtentry *rt, u_int rtableid)
383 {
384 	struct rtentry *nhrt;
385 
386 	NET_ASSERT_LOCKED();
387 
388 	KASSERT(ISSET(rt->rt_flags, RTF_GATEWAY));
389 
390 	/* If we cannot find a valid next hop bail. */
391 	nhrt = rt_match(rt->rt_gateway, NULL, RT_RESOLVE, rtable_l2(rtableid));
392 	if (nhrt == NULL)
393 		return (ENOENT);
394 
395 	/* Next hop entry must be on the same interface. */
396 	if (nhrt->rt_ifidx != rt->rt_ifidx) {
397 		rtfree(nhrt);
398 		return (EHOSTUNREACH);
399 	}
400 
401 	/*
402 	 * Next hop must be reachable, this also prevents rtentry
403 	 * loops for example when rt->rt_gwroute points to rt.
404 	 */
405 	if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)) {
406 		rtfree(nhrt);
407 		return (ENETUNREACH);
408 	}
409 
410 	/* Next hop is valid so remove possible old cache. */
411 	rt_putgwroute(rt);
412 	KASSERT(rt->rt_gwroute == NULL);
413 
414 	/*
415 	 * If the MTU of next hop is 0, this will reset the MTU of the
416 	 * route to run PMTUD again from scratch.
417 	 */
418 	if (!ISSET(rt->rt_locks, RTV_MTU) && (rt->rt_mtu > nhrt->rt_mtu))
419 		rt->rt_mtu = nhrt->rt_mtu;
420 
421 	/*
422 	 * To avoid reference counting problems when writting link-layer
423 	 * addresses in an outgoing packet, we ensure that the lifetime
424 	 * of a cached entry is greater that the bigger lifetime of the
425 	 * gateway entries it is pointed by.
426 	 */
427 	nhrt->rt_flags |= RTF_CACHED;
428 	nhrt->rt_cachecnt++;
429 
430 	rt->rt_gwroute = nhrt;
431 
432 	return (0);
433 }
434 
435 /*
436  * Invalidate the cached route entry of the gateway entry ``rt''.
437  */
438 void
439 rt_putgwroute(struct rtentry *rt)
440 {
441 	struct rtentry *nhrt = rt->rt_gwroute;
442 
443 	NET_ASSERT_LOCKED();
444 
445 	if (!ISSET(rt->rt_flags, RTF_GATEWAY) || nhrt == NULL)
446 		return;
447 
448 	KASSERT(ISSET(nhrt->rt_flags, RTF_CACHED));
449 	KASSERT(nhrt->rt_cachecnt > 0);
450 
451 	--nhrt->rt_cachecnt;
452 	if (nhrt->rt_cachecnt == 0)
453 		nhrt->rt_flags &= ~RTF_CACHED;
454 
455 	rtfree(rt->rt_gwroute);
456 	rt->rt_gwroute = NULL;
457 }
458 
459 void
460 rtref(struct rtentry *rt)
461 {
462 	atomic_inc_int(&rt->rt_refcnt);
463 }
464 
465 void
466 rtfree(struct rtentry *rt)
467 {
468 	int		 refcnt;
469 
470 	if (rt == NULL)
471 		return;
472 
473 	refcnt = (int)atomic_dec_int_nv(&rt->rt_refcnt);
474 	if (refcnt <= 0) {
475 		KASSERT(!ISSET(rt->rt_flags, RTF_UP));
476 		KASSERT(!RT_ROOT(rt));
477 		atomic_dec_int(&rttrash);
478 		if (refcnt < 0) {
479 			printf("rtfree: %p not freed (neg refs)\n", rt);
480 			return;
481 		}
482 
483 		KERNEL_LOCK();
484 		rt_timer_remove_all(rt);
485 		ifafree(rt->rt_ifa);
486 		rtlabel_unref(rt->rt_labelid);
487 #ifdef MPLS
488 		if (rt->rt_flags & RTF_MPLS)
489 			free(rt->rt_llinfo, M_TEMP, sizeof(struct rt_mpls));
490 #endif
491 		free(rt->rt_gateway, M_RTABLE, ROUNDUP(rt->rt_gateway->sa_len));
492 		free(rt_key(rt), M_RTABLE, rt_key(rt)->sa_len);
493 		KERNEL_UNLOCK();
494 
495 		pool_put(&rtentry_pool, rt);
496 	}
497 }
498 
499 void
500 ifafree(struct ifaddr *ifa)
501 {
502 	if (ifa == NULL)
503 		panic("ifafree");
504 	if (ifa->ifa_refcnt == 0) {
505 		ifatrash--;
506 		free(ifa, M_IFADDR, 0);
507 	} else
508 		ifa->ifa_refcnt--;
509 }
510 
511 /*
512  * Force a routing table entry to the specified
513  * destination to go through the given gateway.
514  * Normally called as a result of a routing redirect
515  * message from the network layer.
516  */
517 void
518 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
519     struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain)
520 {
521 	struct rtentry		*rt;
522 	int			 error = 0;
523 	enum rtstat_counters	 stat = rts_ncounters;
524 	struct rt_addrinfo	 info;
525 	struct ifaddr		*ifa;
526 	unsigned int		 ifidx = 0;
527 	int			 flags = RTF_GATEWAY|RTF_HOST;
528 	uint8_t			 prio = RTP_NONE;
529 
530 	NET_ASSERT_LOCKED();
531 
532 	/* verify the gateway is directly reachable */
533 	rt = rtalloc(gateway, 0, rdomain);
534 	if (!rtisvalid(rt) || ISSET(rt->rt_flags, RTF_GATEWAY)) {
535 		rtfree(rt);
536 		error = ENETUNREACH;
537 		goto out;
538 	}
539 	ifidx = rt->rt_ifidx;
540 	ifa = rt->rt_ifa;
541 	rtfree(rt);
542 	rt = NULL;
543 
544 	rt = rtable_lookup(rdomain, dst, NULL, NULL, RTP_ANY);
545 	/*
546 	 * If the redirect isn't from our current router for this dst,
547 	 * it's either old or wrong.  If it redirects us to ourselves,
548 	 * we have a routing loop, perhaps as a result of an interface
549 	 * going down recently.
550 	 */
551 #define	equal(a1, a2) \
552 	((a1)->sa_len == (a2)->sa_len && \
553 	 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
554 	if (rt != NULL && (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
555 		error = EINVAL;
556 	else if (ifa_ifwithaddr(gateway, rdomain) != NULL ||
557 	    (gateway->sa_family = AF_INET &&
558 	    in_broadcast(satosin(gateway)->sin_addr, rdomain)))
559 		error = EHOSTUNREACH;
560 	if (error)
561 		goto done;
562 	/*
563 	 * Create a new entry if we just got back a wildcard entry
564 	 * or the lookup failed.  This is necessary for hosts
565 	 * which use routing redirects generated by smart gateways
566 	 * to dynamically build the routing tables.
567 	 */
568 	if (rt == NULL)
569 		goto create;
570 	/*
571 	 * Don't listen to the redirect if it's
572 	 * for a route to an interface.
573 	 */
574 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
575 		if (!ISSET(rt->rt_flags, RTF_HOST)) {
576 			/*
577 			 * Changing from route to net => route to host.
578 			 * Create new route, rather than smashing route to net.
579 			 */
580 create:
581 			rtfree(rt);
582 			flags |= RTF_DYNAMIC;
583 			bzero(&info, sizeof(info));
584 			info.rti_info[RTAX_DST] = dst;
585 			info.rti_info[RTAX_GATEWAY] = gateway;
586 			info.rti_ifa = ifa;
587 			info.rti_flags = flags;
588 			rt = NULL;
589 			error = rtrequest(RTM_ADD, &info, RTP_DEFAULT, &rt,
590 			    rdomain);
591 			if (error == 0) {
592 				flags = rt->rt_flags;
593 				prio = rt->rt_priority;
594 			}
595 			stat = rts_dynamic;
596 		} else {
597 			/*
598 			 * Smash the current notion of the gateway to
599 			 * this destination.  Should check about netmask!!!
600 			 */
601 			rt->rt_flags |= RTF_MODIFIED;
602 			flags |= RTF_MODIFIED;
603 			prio = rt->rt_priority;
604 			stat = rts_newgateway;
605 			rt_setgate(rt, gateway, rdomain);
606 		}
607 	} else
608 		error = EHOSTUNREACH;
609 done:
610 	if (rt) {
611 		if (rtp && !error)
612 			*rtp = rt;
613 		else
614 			rtfree(rt);
615 	}
616 out:
617 	if (error)
618 		rtstat_inc(rts_badredirect);
619 	else if (stat != rts_ncounters)
620 		rtstat_inc(stat);
621 	bzero((caddr_t)&info, sizeof(info));
622 	info.rti_info[RTAX_DST] = dst;
623 	info.rti_info[RTAX_GATEWAY] = gateway;
624 	info.rti_info[RTAX_AUTHOR] = src;
625 	KERNEL_LOCK();
626 	rtm_miss(RTM_REDIRECT, &info, flags, prio, ifidx, error, rdomain);
627 	KERNEL_UNLOCK();
628 }
629 
630 /*
631  * Delete a route and generate a message
632  */
633 int
634 rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid)
635 {
636 	int			error;
637 	struct rt_addrinfo	info;
638 	unsigned int		ifidx;
639 	struct sockaddr_in6	sa_mask;
640 
641 	KASSERT(rt->rt_ifidx == ifp->if_index);
642 
643 	/*
644 	 * Request the new route so that the entry is not actually
645 	 * deleted.  That will allow the information being reported to
646 	 * be accurate (and consistent with route_output()).
647 	 */
648 	bzero((caddr_t)&info, sizeof(info));
649 	info.rti_info[RTAX_DST] = rt_key(rt);
650 	if (!ISSET(rt->rt_flags, RTF_HOST))
651 		info.rti_info[RTAX_NETMASK] = rt_plen2mask(rt, &sa_mask);
652 	info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
653 	info.rti_flags = rt->rt_flags;
654 	ifidx = rt->rt_ifidx;
655 	error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid);
656 	KERNEL_LOCK();
657 	rtm_miss(RTM_DELETE, &info, info.rti_flags, rt->rt_priority, ifidx,
658 	    error, tableid);
659 	KERNEL_UNLOCK();
660 	if (error == 0)
661 		rtfree(rt);
662 	return (error);
663 }
664 
665 static inline int
666 rtequal(struct rtentry *a, struct rtentry *b)
667 {
668 	if (a == b)
669 		return 1;
670 
671 	if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len) == 0 &&
672 	    rt_plen(a) == rt_plen(b))
673 		return 1;
674 	else
675 		return 0;
676 }
677 
678 int
679 rtflushclone1(struct rtentry *rt, void *arg, u_int id)
680 {
681 	struct rtentry *parent = arg;
682 	struct ifnet *ifp;
683 	int error;
684 
685 	ifp = if_get(rt->rt_ifidx);
686 
687 	/*
688 	 * This happens when an interface with a RTF_CLONING route is
689 	 * being detached.  In this case it's safe to bail because all
690 	 * the routes are being purged by rt_ifa_purge().
691 	 */
692 	if (ifp == NULL)
693 	        return 0;
694 
695 	if (ISSET(rt->rt_flags, RTF_CLONED) && rtequal(rt->rt_parent, parent)) {
696 	        error = rtdeletemsg(rt, ifp, id);
697 	        if (error == 0)
698 			error = EAGAIN;
699 	} else
700 		error = 0;
701 
702 	if_put(ifp);
703 	return error;
704 }
705 
706 void
707 rtflushclone(unsigned int rtableid, struct rtentry *parent)
708 {
709 
710 #ifdef DIAGNOSTIC
711 	if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
712 		panic("rtflushclone: called with a non-cloning route");
713 #endif
714 	rtable_walk(rtableid, rt_key(parent)->sa_family, rtflushclone1, parent);
715 }
716 
717 struct ifaddr *
718 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway,
719     u_int rtableid)
720 {
721 	struct ifaddr	*ifa;
722 
723 	if ((flags & RTF_GATEWAY) == 0) {
724 		/*
725 		 * If we are adding a route to an interface,
726 		 * and the interface is a pt to pt link
727 		 * we should search for the destination
728 		 * as our clue to the interface.  Otherwise
729 		 * we can use the local address.
730 		 */
731 		ifa = NULL;
732 		if (flags & RTF_HOST)
733 			ifa = ifa_ifwithdstaddr(dst, rtableid);
734 		if (ifa == NULL)
735 			ifa = ifa_ifwithaddr(gateway, rtableid);
736 	} else {
737 		/*
738 		 * If we are adding a route to a remote net
739 		 * or host, the gateway may still be on the
740 		 * other end of a pt to pt link.
741 		 */
742 		ifa = ifa_ifwithdstaddr(gateway, rtableid);
743 	}
744 	if (ifa == NULL) {
745 		if (gateway->sa_family == AF_LINK) {
746 			struct sockaddr_dl *sdl = satosdl(gateway);
747 			struct ifnet *ifp = if_get(sdl->sdl_index);
748 
749 			if (ifp != NULL)
750 				ifa = ifaof_ifpforaddr(dst, ifp);
751 			if_put(ifp);
752 		} else {
753 			struct rtentry *rt;
754 
755 			rt = rtalloc(gateway, RT_RESOLVE, rtable_l2(rtableid));
756 			if (rt != NULL)
757 				ifa = rt->rt_ifa;
758 			rtfree(rt);
759 		}
760 	}
761 	if (ifa == NULL)
762 		return (NULL);
763 	if (ifa->ifa_addr->sa_family != dst->sa_family) {
764 		struct ifaddr	*oifa = ifa;
765 		ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
766 		if (ifa == NULL)
767 			ifa = oifa;
768 	}
769 	return (ifa);
770 }
771 
772 int
773 rt_getifa(struct rt_addrinfo *info, u_int rtid)
774 {
775 	struct ifnet	*ifp = NULL;
776 
777 	/*
778 	 * ifp may be specified by sockaddr_dl when protocol address
779 	 * is ambiguous
780 	 */
781 	if (info->rti_info[RTAX_IFP] != NULL) {
782 		struct sockaddr_dl *sdl;
783 
784 		sdl = satosdl(info->rti_info[RTAX_IFP]);
785 		ifp = if_get(sdl->sdl_index);
786 	}
787 
788 #ifdef IPSEC
789 	/*
790 	 * If the destination is a PF_KEY address, we'll look
791 	 * for the existence of a encap interface number or address
792 	 * in the options list of the gateway. By default, we'll return
793 	 * enc0.
794 	 */
795 	if (info->rti_info[RTAX_DST] &&
796 	    info->rti_info[RTAX_DST]->sa_family == PF_KEY)
797 		info->rti_ifa = enc_getifa(rtid, 0);
798 #endif
799 
800 	if (info->rti_ifa == NULL && info->rti_info[RTAX_IFA] != NULL)
801 		info->rti_ifa = ifa_ifwithaddr(info->rti_info[RTAX_IFA], rtid);
802 
803 	if (info->rti_ifa == NULL) {
804 		struct sockaddr	*sa;
805 
806 		if ((sa = info->rti_info[RTAX_IFA]) == NULL)
807 			if ((sa = info->rti_info[RTAX_GATEWAY]) == NULL)
808 				sa = info->rti_info[RTAX_DST];
809 
810 		if (sa != NULL && ifp != NULL)
811 			info->rti_ifa = ifaof_ifpforaddr(sa, ifp);
812 		else if (info->rti_info[RTAX_DST] != NULL &&
813 		    info->rti_info[RTAX_GATEWAY] != NULL)
814 			info->rti_ifa = ifa_ifwithroute(info->rti_flags,
815 			    info->rti_info[RTAX_DST],
816 			    info->rti_info[RTAX_GATEWAY],
817 			    rtid);
818 		else if (sa != NULL)
819 			info->rti_ifa = ifa_ifwithroute(info->rti_flags,
820 			    sa, sa, rtid);
821 	}
822 
823 	if_put(ifp);
824 
825 	if (info->rti_ifa == NULL)
826 		return (ENETUNREACH);
827 
828 	return (0);
829 }
830 
831 int
832 rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp,
833     struct rtentry **ret_nrt, u_int tableid)
834 {
835 	struct rtentry	*rt;
836 	int		 error;
837 
838 	NET_ASSERT_LOCKED();
839 
840 	if (!rtable_exists(tableid))
841 		return (EAFNOSUPPORT);
842 	rt = rtable_lookup(tableid, info->rti_info[RTAX_DST],
843 	    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], prio);
844 	if (rt == NULL)
845 		return (ESRCH);
846 
847 	/* Make sure that's the route the caller want to delete. */
848 	if (ifp != NULL && ifp->if_index != rt->rt_ifidx) {
849 		rtfree(rt);
850 		return (ESRCH);
851 	}
852 
853 #ifndef SMALL_KERNEL
854 	/*
855 	 * If we got multipath routes, we require users to specify
856 	 * a matching gateway.
857 	 */
858 	if ((rt->rt_flags & RTF_MPATH) &&
859 	    info->rti_info[RTAX_GATEWAY] == NULL) {
860 		rtfree(rt);
861 		return (ESRCH);
862 	}
863 #endif
864 
865 #ifdef BFD
866 	if (ISSET(rt->rt_flags, RTF_BFD))
867 		bfdclear(rt);
868 #endif
869 
870 	error = rtable_delete(tableid, info->rti_info[RTAX_DST],
871 	    info->rti_info[RTAX_NETMASK], rt);
872 	if (error != 0) {
873 		rtfree(rt);
874 		return (ESRCH);
875 	}
876 
877 	/* Release next hop cache before flushing cloned entries. */
878 	rt_putgwroute(rt);
879 
880 	/* Clean up any cloned children. */
881 	if (ISSET(rt->rt_flags, RTF_CLONING))
882 		rtflushclone(tableid, rt);
883 
884 	rtfree(rt->rt_parent);
885 	rt->rt_parent = NULL;
886 
887 	rt->rt_flags &= ~RTF_UP;
888 
889 	KASSERT(ifp->if_index == rt->rt_ifidx);
890 	ifp->if_rtrequest(ifp, RTM_DELETE, rt);
891 
892 	atomic_inc_int(&rttrash);
893 
894 	if (ret_nrt != NULL)
895 		*ret_nrt = rt;
896 	else
897 		rtfree(rt);
898 
899 	return (0);
900 }
901 
902 int
903 rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio,
904     struct rtentry **ret_nrt, u_int tableid)
905 {
906 	struct ifnet		*ifp;
907 	struct rtentry		*rt, *crt;
908 	struct ifaddr		*ifa;
909 	struct sockaddr		*ndst;
910 	struct sockaddr_rtlabel	*sa_rl, sa_rl2;
911 	struct sockaddr_dl	 sa_dl = { sizeof(sa_dl), AF_LINK };
912 	int			 dlen, error;
913 #ifdef MPLS
914 	struct sockaddr_mpls	*sa_mpls;
915 #endif
916 
917 	NET_ASSERT_LOCKED();
918 
919 	if (!rtable_exists(tableid))
920 		return (EAFNOSUPPORT);
921 	if (info->rti_flags & RTF_HOST)
922 		info->rti_info[RTAX_NETMASK] = NULL;
923 	switch (req) {
924 	case RTM_DELETE:
925 		return (EINVAL);
926 
927 	case RTM_RESOLVE:
928 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
929 			return (EINVAL);
930 		if ((rt->rt_flags & RTF_CLONING) == 0)
931 			return (EINVAL);
932 		KASSERT(rt->rt_ifa->ifa_ifp != NULL);
933 		info->rti_ifa = rt->rt_ifa;
934 		info->rti_flags = rt->rt_flags | (RTF_CLONED|RTF_HOST);
935 		info->rti_flags &= ~(RTF_CLONING|RTF_CONNECTED|RTF_STATIC);
936 		info->rti_info[RTAX_GATEWAY] = sdltosa(&sa_dl);
937 		info->rti_info[RTAX_LABEL] =
938 		    rtlabel_id2sa(rt->rt_labelid, &sa_rl2);
939 		/* FALLTHROUGH */
940 
941 	case RTM_ADD:
942 		if (info->rti_ifa == NULL && (error = rt_getifa(info, tableid)))
943 			return (error);
944 		ifa = info->rti_ifa;
945 		ifp = ifa->ifa_ifp;
946 		if (prio == 0)
947 			prio = ifp->if_priority + RTP_STATIC;
948 
949 		dlen = info->rti_info[RTAX_DST]->sa_len;
950 		ndst = malloc(dlen, M_RTABLE, M_NOWAIT);
951 		if (ndst == NULL)
952 			return (ENOBUFS);
953 
954 		if (info->rti_info[RTAX_NETMASK] != NULL)
955 			rt_maskedcopy(info->rti_info[RTAX_DST], ndst,
956 			    info->rti_info[RTAX_NETMASK]);
957 		else
958 			memcpy(ndst, info->rti_info[RTAX_DST], dlen);
959 
960 		rt = pool_get(&rtentry_pool, PR_NOWAIT | PR_ZERO);
961 		if (rt == NULL) {
962 			free(ndst, M_RTABLE, dlen);
963 			return (ENOBUFS);
964 		}
965 
966 		rt->rt_refcnt = 1;
967 		rt->rt_flags = info->rti_flags | RTF_UP;
968 		rt->rt_priority = prio;	/* init routing priority */
969 		LIST_INIT(&rt->rt_timer);
970 
971 #ifndef SMALL_KERNEL
972 		/* Check the link state if the table supports it. */
973 		if (rtable_mpath_capable(tableid, ndst->sa_family) &&
974 		    !ISSET(rt->rt_flags, RTF_LOCAL) &&
975 		    (!LINK_STATE_IS_UP(ifp->if_link_state) ||
976 		    !ISSET(ifp->if_flags, IFF_UP))) {
977 			rt->rt_flags &= ~RTF_UP;
978 			rt->rt_priority |= RTP_DOWN;
979 		}
980 #endif
981 
982 		if (info->rti_info[RTAX_LABEL] != NULL) {
983 			sa_rl = (struct sockaddr_rtlabel *)
984 			    info->rti_info[RTAX_LABEL];
985 			rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label);
986 		}
987 
988 #ifdef MPLS
989 		/* We have to allocate additional space for MPLS infos */
990 		if (info->rti_flags & RTF_MPLS &&
991 		    (info->rti_info[RTAX_SRC] != NULL ||
992 		    info->rti_info[RTAX_DST]->sa_family == AF_MPLS)) {
993 			struct rt_mpls *rt_mpls;
994 
995 			sa_mpls = (struct sockaddr_mpls *)
996 			    info->rti_info[RTAX_SRC];
997 
998 			rt->rt_llinfo = malloc(sizeof(struct rt_mpls),
999 			    M_TEMP, M_NOWAIT|M_ZERO);
1000 
1001 			if (rt->rt_llinfo == NULL) {
1002 				free(ndst, M_RTABLE, dlen);
1003 				pool_put(&rtentry_pool, rt);
1004 				return (ENOMEM);
1005 			}
1006 
1007 			rt_mpls = (struct rt_mpls *)rt->rt_llinfo;
1008 
1009 			if (sa_mpls != NULL)
1010 				rt_mpls->mpls_label = sa_mpls->smpls_label;
1011 
1012 			rt_mpls->mpls_operation = info->rti_mpls;
1013 
1014 			/* XXX: set experimental bits */
1015 
1016 			rt->rt_flags |= RTF_MPLS;
1017 		} else
1018 			rt->rt_flags &= ~RTF_MPLS;
1019 #endif
1020 
1021 		ifa->ifa_refcnt++;
1022 		rt->rt_ifa = ifa;
1023 		rt->rt_ifidx = ifp->if_index;
1024 		/*
1025 		 * Copy metrics and a back pointer from the cloned
1026 		 * route's parent.
1027 		 */
1028 		if (ISSET(rt->rt_flags, RTF_CLONED)) {
1029 			rtref(*ret_nrt);
1030 			rt->rt_parent = *ret_nrt;
1031 			rt->rt_rmx = (*ret_nrt)->rt_rmx;
1032 		}
1033 
1034 		/*
1035 		 * We must set rt->rt_gateway before adding ``rt'' to
1036 		 * the routing table because the radix MPATH code use
1037 		 * it to (re)order routes.
1038 		 */
1039 		if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY],
1040 		    tableid))) {
1041 			ifafree(ifa);
1042 			rtfree(rt->rt_parent);
1043 			rt_putgwroute(rt);
1044 			free(rt->rt_gateway, M_RTABLE, 0);
1045 			free(ndst, M_RTABLE, dlen);
1046 			pool_put(&rtentry_pool, rt);
1047 			return (error);
1048 		}
1049 
1050 		error = rtable_insert(tableid, ndst,
1051 		    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY],
1052 		    rt->rt_priority, rt);
1053 		if (error != 0 &&
1054 		    (crt = rtable_match(tableid, ndst, NULL)) != NULL) {
1055 			/* overwrite cloned route */
1056 			if (ISSET(crt->rt_flags, RTF_CLONED)) {
1057 				struct ifnet *cifp;
1058 
1059 				cifp = if_get(crt->rt_ifidx);
1060 				KASSERT(cifp != NULL);
1061 				rtdeletemsg(crt, cifp, tableid);
1062 				if_put(cifp);
1063 
1064 				error = rtable_insert(tableid, ndst,
1065 				    info->rti_info[RTAX_NETMASK],
1066 				    info->rti_info[RTAX_GATEWAY],
1067 				    rt->rt_priority, rt);
1068 			}
1069 			rtfree(crt);
1070 		}
1071 		if (error != 0) {
1072 			ifafree(ifa);
1073 			rtfree(rt->rt_parent);
1074 			rt_putgwroute(rt);
1075 			free(rt->rt_gateway, M_RTABLE, 0);
1076 			free(ndst, M_RTABLE, dlen);
1077 			pool_put(&rtentry_pool, rt);
1078 			return (EEXIST);
1079 		}
1080 		ifp->if_rtrequest(ifp, req, rt);
1081 
1082 		if_group_routechange(info->rti_info[RTAX_DST],
1083 			info->rti_info[RTAX_NETMASK]);
1084 
1085 		if (ret_nrt != NULL)
1086 			*ret_nrt = rt;
1087 		else
1088 			rtfree(rt);
1089 		break;
1090 	}
1091 
1092 	return (0);
1093 }
1094 
1095 int
1096 rt_setgate(struct rtentry *rt, struct sockaddr *gate, u_int rtableid)
1097 {
1098 	int glen = ROUNDUP(gate->sa_len);
1099 	struct sockaddr *sa;
1100 
1101 	if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
1102 		sa = malloc(glen, M_RTABLE, M_NOWAIT);
1103 		if (sa == NULL)
1104 			return (ENOBUFS);
1105 		free(rt->rt_gateway, M_RTABLE, 0);
1106 		rt->rt_gateway = sa;
1107 	}
1108 	memmove(rt->rt_gateway, gate, glen);
1109 
1110 	if (ISSET(rt->rt_flags, RTF_GATEWAY))
1111 		return (rt_setgwroute(rt, rtableid));
1112 
1113 	return (0);
1114 }
1115 
1116 /*
1117  * Return the route entry containing the next hop link-layer
1118  * address corresponding to ``rt''.
1119  */
1120 struct rtentry *
1121 rt_getll(struct rtentry *rt)
1122 {
1123 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
1124 		KASSERT(rt->rt_gwroute != NULL);
1125 		return (rt->rt_gwroute);
1126 	}
1127 
1128 	return (rt);
1129 }
1130 
1131 void
1132 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
1133     struct sockaddr *netmask)
1134 {
1135 	u_char	*cp1 = (u_char *)src;
1136 	u_char	*cp2 = (u_char *)dst;
1137 	u_char	*cp3 = (u_char *)netmask;
1138 	u_char	*cplim = cp2 + *cp3;
1139 	u_char	*cplim2 = cp2 + *cp1;
1140 
1141 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1142 	cp3 += 2;
1143 	if (cplim > cplim2)
1144 		cplim = cplim2;
1145 	while (cp2 < cplim)
1146 		*cp2++ = *cp1++ & *cp3++;
1147 	if (cp2 < cplim2)
1148 		bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
1149 }
1150 
1151 int
1152 rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst)
1153 {
1154 	struct ifnet		*ifp = ifa->ifa_ifp;
1155 	struct rtentry		*rt;
1156 	struct sockaddr_rtlabel	 sa_rl;
1157 	struct rt_addrinfo	 info;
1158 	unsigned int		 rtableid = ifp->if_rdomain;
1159 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1160 	int			 error;
1161 
1162 	memset(&info, 0, sizeof(info));
1163 	info.rti_ifa = ifa;
1164 	info.rti_flags = flags | RTF_MPATH;
1165 	info.rti_info[RTAX_DST] = dst;
1166 	if (flags & RTF_LLINFO)
1167 		info.rti_info[RTAX_GATEWAY] = sdltosa(ifp->if_sadl);
1168 	else
1169 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1170 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1171 
1172 #ifdef MPLS
1173 	if ((flags & RTF_MPLS) == RTF_MPLS)
1174 		info.rti_mpls = MPLS_OP_POP;
1175 #endif /* MPLS */
1176 
1177 	if ((flags & RTF_HOST) == 0)
1178 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1179 
1180 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1181 		prio = RTP_LOCAL;
1182 
1183 	if (flags & RTF_CONNECTED)
1184 		prio = ifp->if_priority + RTP_CONNECTED;
1185 
1186 	error = rtrequest(RTM_ADD, &info, prio, &rt, rtableid);
1187 	if (error == 0) {
1188 		/*
1189 		 * A local route is created for every address configured
1190 		 * on an interface, so use this information to notify
1191 		 * userland that a new address has been added.
1192 		 */
1193 		if (flags & RTF_LOCAL)
1194 			rtm_addr(rt, RTM_NEWADDR, ifa);
1195 		rtm_send(rt, RTM_ADD, rtableid);
1196 		rtfree(rt);
1197 	}
1198 	return (error);
1199 }
1200 
1201 int
1202 rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst)
1203 {
1204 	struct ifnet		*ifp = ifa->ifa_ifp;
1205 	struct rtentry		*rt;
1206 	struct mbuf		*m = NULL;
1207 	struct sockaddr		*deldst;
1208 	struct rt_addrinfo	 info;
1209 	struct sockaddr_rtlabel	 sa_rl;
1210 	unsigned int		 rtableid = ifp->if_rdomain;
1211 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1212 	int			 error;
1213 
1214 #ifdef MPLS
1215 	if ((flags & RTF_MPLS) == RTF_MPLS)
1216 		/* MPLS routes only exist in rdomain 0 */
1217 		rtableid = 0;
1218 #endif /* MPLS */
1219 
1220 	if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
1221 		m = m_get(M_DONTWAIT, MT_SONAME);
1222 		if (m == NULL)
1223 			return (ENOBUFS);
1224 		deldst = mtod(m, struct sockaddr *);
1225 		rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
1226 		dst = deldst;
1227 	}
1228 
1229 	memset(&info, 0, sizeof(info));
1230 	info.rti_ifa = ifa;
1231 	info.rti_flags = flags;
1232 	info.rti_info[RTAX_DST] = dst;
1233 	if ((flags & RTF_LLINFO) == 0)
1234 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1235 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1236 
1237 	if ((flags & RTF_HOST) == 0)
1238 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1239 
1240 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1241 		prio = RTP_LOCAL;
1242 
1243 	if (flags & RTF_CONNECTED)
1244 		prio = ifp->if_priority + RTP_CONNECTED;
1245 
1246 	error = rtrequest_delete(&info, prio, ifp, &rt, rtableid);
1247 	if (error == 0) {
1248 		rtm_send(rt, RTM_DELETE, rtableid);
1249 		if (flags & RTF_LOCAL)
1250 			rtm_addr(rt, RTM_DELADDR, ifa);
1251 		rtfree(rt);
1252 	}
1253 	m_free(m);
1254 
1255 	return (error);
1256 }
1257 
1258 /*
1259  * Add ifa's address as a local rtentry.
1260  */
1261 int
1262 rt_ifa_addlocal(struct ifaddr *ifa)
1263 {
1264 	struct rtentry *rt;
1265 	u_int flags = RTF_HOST|RTF_LOCAL;
1266 	int error = 0;
1267 
1268 	/*
1269 	 * If the configured address correspond to the magical "any"
1270 	 * address do not add a local route entry because that might
1271 	 * corrupt the routing tree which uses this value for the
1272 	 * default routes.
1273 	 */
1274 	switch (ifa->ifa_addr->sa_family) {
1275 	case AF_INET:
1276 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1277 			return (0);
1278 		break;
1279 #ifdef INET6
1280 	case AF_INET6:
1281 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1282 		    &in6addr_any))
1283 			return (0);
1284 		break;
1285 #endif
1286 	default:
1287 		break;
1288 	}
1289 
1290 	if (!ISSET(ifa->ifa_ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1291 		flags |= RTF_LLINFO;
1292 
1293 	/* If there is no local entry, allocate one. */
1294 	rt = rtalloc(ifa->ifa_addr, 0, ifa->ifa_ifp->if_rdomain);
1295 	if (rt == NULL || ISSET(rt->rt_flags, flags) != flags)
1296 		error = rt_ifa_add(ifa, flags, ifa->ifa_addr);
1297 	rtfree(rt);
1298 
1299 	return (error);
1300 }
1301 
1302 /*
1303  * Remove local rtentry of ifa's addresss if it exists.
1304  */
1305 int
1306 rt_ifa_dellocal(struct ifaddr *ifa)
1307 {
1308 	struct rtentry *rt;
1309 	u_int flags = RTF_HOST|RTF_LOCAL;
1310 	int error = 0;
1311 
1312 	/*
1313 	 * We do not add local routes for such address, so do not bother
1314 	 * removing them.
1315 	 */
1316 	switch (ifa->ifa_addr->sa_family) {
1317 	case AF_INET:
1318 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1319 			return (0);
1320 		break;
1321 #ifdef INET6
1322 	case AF_INET6:
1323 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1324 		    &in6addr_any))
1325 			return (0);
1326 		break;
1327 #endif
1328 	default:
1329 		break;
1330 	}
1331 
1332 	if (!ISSET(ifa->ifa_ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1333 		flags |= RTF_LLINFO;
1334 
1335 	/*
1336 	 * Before deleting, check if a corresponding local host
1337 	 * route surely exists.  With this check, we can avoid to
1338 	 * delete an interface direct route whose destination is same
1339 	 * as the address being removed.  This can happen when removing
1340 	 * a subnet-router anycast address on an interface attached
1341 	 * to a shared medium.
1342 	 */
1343 	rt = rtalloc(ifa->ifa_addr, 0, ifa->ifa_ifp->if_rdomain);
1344 	if (rt != NULL && ISSET(rt->rt_flags, flags) == flags)
1345 		error = rt_ifa_del(ifa, flags, ifa->ifa_addr);
1346 	rtfree(rt);
1347 
1348 	return (error);
1349 }
1350 
1351 /*
1352  * Remove all addresses attached to ``ifa''.
1353  */
1354 void
1355 rt_ifa_purge(struct ifaddr *ifa)
1356 {
1357 	struct ifnet		*ifp = ifa->ifa_ifp;
1358 	unsigned int		 rtableid;
1359 	int			 i;
1360 
1361 	KASSERT(ifp != NULL);
1362 
1363 	for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1364 		/* skip rtables that are not in the rdomain of the ifp */
1365 		if (rtable_l2(rtableid) != ifp->if_rdomain)
1366 			continue;
1367 		for (i = 1; i <= AF_MAX; i++) {
1368 			rtable_walk(rtableid, i, rt_ifa_purge_walker, ifa);
1369 		}
1370 	}
1371 }
1372 
1373 int
1374 rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid)
1375 {
1376 	struct ifaddr		*ifa = vifa;
1377 	struct ifnet		*ifp = ifa->ifa_ifp;
1378 	int			 error;
1379 
1380 	if (rt->rt_ifa != ifa)
1381 		return (0);
1382 
1383 	if ((error = rtdeletemsg(rt, ifp, rtableid))) {
1384 		return (error);
1385 	}
1386 
1387 	return (EAGAIN);
1388 
1389 }
1390 
1391 /*
1392  * Route timer routines.  These routes allow functions to be called
1393  * for various routes at any time.  This is useful in supporting
1394  * path MTU discovery and redirect route deletion.
1395  *
1396  * This is similar to some BSDI internal functions, but it provides
1397  * for multiple queues for efficiency's sake...
1398  */
1399 
1400 LIST_HEAD(, rttimer_queue)	rttimer_queue_head;
1401 static int			rt_init_done = 0;
1402 
1403 #define RTTIMER_CALLOUT(r)	{					\
1404 	if (r->rtt_func != NULL) {					\
1405 		(*r->rtt_func)(r->rtt_rt, r);				\
1406 	} else {							\
1407 		struct ifnet *ifp;					\
1408 									\
1409 		ifp = if_get(r->rtt_rt->rt_ifidx);			\
1410 		if (ifp != NULL) 					\
1411 			rtdeletemsg(r->rtt_rt, ifp, r->rtt_tableid);	\
1412 		if_put(ifp);						\
1413 	}								\
1414 }
1415 
1416 /*
1417  * Some subtle order problems with domain initialization mean that
1418  * we cannot count on this being run from rt_init before various
1419  * protocol initializations are done.  Therefore, we make sure
1420  * that this is run when the first queue is added...
1421  */
1422 
1423 void
1424 rt_timer_init(void)
1425 {
1426 	static struct timeout	rt_timer_timeout;
1427 
1428 	if (rt_init_done)
1429 		panic("rt_timer_init: already initialized");
1430 
1431 	pool_init(&rttimer_pool, sizeof(struct rttimer), 0, IPL_SOFTNET, 0,
1432 	    "rttmr", NULL);
1433 
1434 	LIST_INIT(&rttimer_queue_head);
1435 	timeout_set_proc(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout);
1436 	timeout_add_sec(&rt_timer_timeout, 1);
1437 	rt_init_done = 1;
1438 }
1439 
1440 struct rttimer_queue *
1441 rt_timer_queue_create(u_int timeout)
1442 {
1443 	struct rttimer_queue	*rtq;
1444 
1445 	if (rt_init_done == 0)
1446 		rt_timer_init();
1447 
1448 	if ((rtq = malloc(sizeof(*rtq), M_RTABLE, M_NOWAIT|M_ZERO)) == NULL)
1449 		return (NULL);
1450 
1451 	rtq->rtq_timeout = timeout;
1452 	rtq->rtq_count = 0;
1453 	TAILQ_INIT(&rtq->rtq_head);
1454 	LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1455 
1456 	return (rtq);
1457 }
1458 
1459 void
1460 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1461 {
1462 	rtq->rtq_timeout = timeout;
1463 }
1464 
1465 void
1466 rt_timer_queue_destroy(struct rttimer_queue *rtq)
1467 {
1468 	struct rttimer	*r;
1469 
1470 	NET_ASSERT_LOCKED();
1471 
1472 	while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1473 		LIST_REMOVE(r, rtt_link);
1474 		TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1475 		RTTIMER_CALLOUT(r);
1476 		pool_put(&rttimer_pool, r);
1477 		if (rtq->rtq_count > 0)
1478 			rtq->rtq_count--;
1479 		else
1480 			printf("rt_timer_queue_destroy: rtq_count reached 0\n");
1481 	}
1482 
1483 	LIST_REMOVE(rtq, rtq_link);
1484 	free(rtq, M_RTABLE, sizeof(*rtq));
1485 }
1486 
1487 unsigned long
1488 rt_timer_queue_count(struct rttimer_queue *rtq)
1489 {
1490 	return (rtq->rtq_count);
1491 }
1492 
1493 void
1494 rt_timer_remove_all(struct rtentry *rt)
1495 {
1496 	struct rttimer	*r;
1497 
1498 	while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1499 		LIST_REMOVE(r, rtt_link);
1500 		TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1501 		if (r->rtt_queue->rtq_count > 0)
1502 			r->rtt_queue->rtq_count--;
1503 		else
1504 			printf("rt_timer_remove_all: rtq_count reached 0\n");
1505 		pool_put(&rttimer_pool, r);
1506 	}
1507 }
1508 
1509 int
1510 rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *,
1511     struct rttimer *), struct rttimer_queue *queue, u_int rtableid)
1512 {
1513 	struct rttimer	*r;
1514 	long		 current_time;
1515 
1516 	current_time = time_uptime;
1517 	rt->rt_expire = time_uptime + queue->rtq_timeout;
1518 
1519 	/*
1520 	 * If there's already a timer with this action, destroy it before
1521 	 * we add a new one.
1522 	 */
1523 	LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1524 		if (r->rtt_func == func) {
1525 			LIST_REMOVE(r, rtt_link);
1526 			TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1527 			if (r->rtt_queue->rtq_count > 0)
1528 				r->rtt_queue->rtq_count--;
1529 			else
1530 				printf("rt_timer_add: rtq_count reached 0\n");
1531 			pool_put(&rttimer_pool, r);
1532 			break;  /* only one per list, so we can quit... */
1533 		}
1534 	}
1535 
1536 	r = pool_get(&rttimer_pool, PR_NOWAIT | PR_ZERO);
1537 	if (r == NULL)
1538 		return (ENOBUFS);
1539 
1540 	r->rtt_rt = rt;
1541 	r->rtt_time = current_time;
1542 	r->rtt_func = func;
1543 	r->rtt_queue = queue;
1544 	r->rtt_tableid = rtableid;
1545 	LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1546 	TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1547 	r->rtt_queue->rtq_count++;
1548 
1549 	return (0);
1550 }
1551 
1552 void
1553 rt_timer_timer(void *arg)
1554 {
1555 	struct timeout		*to = (struct timeout *)arg;
1556 	struct rttimer_queue	*rtq;
1557 	struct rttimer		*r;
1558 	long			 current_time;
1559 	int			 s;
1560 
1561 	current_time = time_uptime;
1562 
1563 	NET_LOCK(s);
1564 	LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1565 		while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1566 		    (r->rtt_time + rtq->rtq_timeout) < current_time) {
1567 			LIST_REMOVE(r, rtt_link);
1568 			TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1569 			RTTIMER_CALLOUT(r);
1570 			pool_put(&rttimer_pool, r);
1571 			if (rtq->rtq_count > 0)
1572 				rtq->rtq_count--;
1573 			else
1574 				printf("rt_timer_timer: rtq_count reached 0\n");
1575 		}
1576 	}
1577 	NET_UNLOCK(s);
1578 
1579 	timeout_add_sec(to, 1);
1580 }
1581 
1582 u_int16_t
1583 rtlabel_name2id(char *name)
1584 {
1585 	struct rt_label		*label, *p;
1586 	u_int16_t		 new_id = 1;
1587 
1588 	if (!name[0])
1589 		return (0);
1590 
1591 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1592 		if (strcmp(name, label->rtl_name) == 0) {
1593 			label->rtl_ref++;
1594 			return (label->rtl_id);
1595 		}
1596 
1597 	/*
1598 	 * to avoid fragmentation, we do a linear search from the beginning
1599 	 * and take the first free slot we find. if there is none or the list
1600 	 * is empty, append a new entry at the end.
1601 	 */
1602 	TAILQ_FOREACH(p, &rt_labels, rtl_entry) {
1603 		if (p->rtl_id != new_id)
1604 			break;
1605 		new_id = p->rtl_id + 1;
1606 	}
1607 	if (new_id > LABELID_MAX)
1608 		return (0);
1609 
1610 	label = malloc(sizeof(*label), M_RTABLE, M_NOWAIT|M_ZERO);
1611 	if (label == NULL)
1612 		return (0);
1613 	strlcpy(label->rtl_name, name, sizeof(label->rtl_name));
1614 	label->rtl_id = new_id;
1615 	label->rtl_ref++;
1616 
1617 	if (p != NULL)	/* insert new entry before p */
1618 		TAILQ_INSERT_BEFORE(p, label, rtl_entry);
1619 	else		/* either list empty or no free slot in between */
1620 		TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry);
1621 
1622 	return (label->rtl_id);
1623 }
1624 
1625 const char *
1626 rtlabel_id2name(u_int16_t id)
1627 {
1628 	struct rt_label	*label;
1629 
1630 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1631 		if (label->rtl_id == id)
1632 			return (label->rtl_name);
1633 
1634 	return (NULL);
1635 }
1636 
1637 struct sockaddr *
1638 rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl)
1639 {
1640 	const char	*label;
1641 
1642 	if (labelid == 0 || (label = rtlabel_id2name(labelid)) == NULL)
1643 		return (NULL);
1644 
1645 	bzero(sa_rl, sizeof(*sa_rl));
1646 	sa_rl->sr_len = sizeof(*sa_rl);
1647 	sa_rl->sr_family = AF_UNSPEC;
1648 	strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label));
1649 
1650 	return ((struct sockaddr *)sa_rl);
1651 }
1652 
1653 void
1654 rtlabel_unref(u_int16_t id)
1655 {
1656 	struct rt_label	*p, *next;
1657 
1658 	if (id == 0)
1659 		return;
1660 
1661 	TAILQ_FOREACH_SAFE(p, &rt_labels, rtl_entry, next) {
1662 		if (id == p->rtl_id) {
1663 			if (--p->rtl_ref == 0) {
1664 				TAILQ_REMOVE(&rt_labels, p, rtl_entry);
1665 				free(p, M_RTABLE, sizeof(*p));
1666 			}
1667 			break;
1668 		}
1669 	}
1670 }
1671 
1672 #ifndef SMALL_KERNEL
1673 void
1674 rt_if_track(struct ifnet *ifp)
1675 {
1676 	int i;
1677 	u_int tid;
1678 
1679 	for (tid = 0; tid < rtmap_limit; tid++) {
1680 		/* skip rtables that are not in the rdomain of the ifp */
1681 		if (rtable_l2(tid) != ifp->if_rdomain)
1682 			continue;
1683 		for (i = 1; i <= AF_MAX; i++) {
1684 			if (!rtable_mpath_capable(tid, i))
1685 				continue;
1686 
1687 			rtable_walk(tid, i, rt_if_linkstate_change, ifp);
1688 		}
1689 	}
1690 }
1691 
1692 int
1693 rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id)
1694 {
1695 	struct ifnet *ifp = arg;
1696 	struct sockaddr_in6 sa_mask;
1697 
1698 	if (rt->rt_ifidx != ifp->if_index)
1699 		return (0);
1700 
1701 	/* Local routes are always usable. */
1702 	if (rt->rt_flags & RTF_LOCAL) {
1703 		rt->rt_flags |= RTF_UP;
1704 		return (0);
1705 	}
1706 
1707 	if (LINK_STATE_IS_UP(ifp->if_link_state) && ifp->if_flags & IFF_UP) {
1708 		if (!(rt->rt_flags & RTF_UP)) {
1709 			/* bring route up */
1710 			rt->rt_flags |= RTF_UP;
1711 			rtable_mpath_reprio(id, rt_key(rt),
1712 			    rt_plen2mask(rt, &sa_mask),
1713 			    rt->rt_priority & RTP_MASK, rt);
1714 		}
1715 	} else {
1716 		if (rt->rt_flags & RTF_UP) {
1717 			/*
1718 			 * Remove redirected and cloned routes (mainly ARP)
1719 			 * from down interfaces so we have a chance to get
1720 			 * new routes from a better source.
1721 			 */
1722 			if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC) &&
1723 			    !ISSET(rt->rt_flags, RTF_CACHED|RTF_BFD)) {
1724 				int error;
1725 
1726 				if ((error = rtdeletemsg(rt, ifp, id)))
1727 					return (error);
1728 				return (EAGAIN);
1729 			}
1730 			/* take route down */
1731 			rt->rt_flags &= ~RTF_UP;
1732 			rtable_mpath_reprio(id, rt_key(rt),
1733 			    rt_plen2mask(rt, &sa_mask),
1734 			    rt->rt_priority | RTP_DOWN, rt);
1735 		}
1736 	}
1737 	if_group_routechange(rt_key(rt), rt_plen2mask(rt, &sa_mask));
1738 
1739 	return (0);
1740 }
1741 #endif
1742 
1743 struct sockaddr *
1744 rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask)
1745 {
1746 	struct sockaddr_in	*sin = (struct sockaddr_in *)sa_mask;
1747 #ifdef INET6
1748 	struct sockaddr_in6	*sin6 = (struct sockaddr_in6 *)sa_mask;
1749 #endif
1750 
1751 	KASSERT(plen >= 0 || plen == -1);
1752 
1753 	if (plen == -1)
1754 		return (NULL);
1755 
1756 	memset(sa_mask, 0, sizeof(*sa_mask));
1757 
1758 	switch (af) {
1759 	case AF_INET:
1760 		sin->sin_family = AF_INET;
1761 		sin->sin_len = sizeof(struct sockaddr_in);
1762 		in_prefixlen2mask(&sin->sin_addr, plen);
1763 		break;
1764 #ifdef INET6
1765 	case AF_INET6:
1766 		sin6->sin6_family = AF_INET6;
1767 		sin6->sin6_len = sizeof(struct sockaddr_in6);
1768 		in6_prefixlen2mask(&sin6->sin6_addr, plen);
1769 		break;
1770 #endif /* INET6 */
1771 	default:
1772 		return (NULL);
1773 	}
1774 
1775 	return ((struct sockaddr *)sa_mask);
1776 }
1777 
1778 struct sockaddr *
1779 rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask)
1780 {
1781 #ifndef ART
1782 	return (rt_mask(rt));
1783 #else
1784 	return (rt_plentosa(rt_key(rt)->sa_family, rt_plen(rt), sa_mask));
1785 #endif /* ART */
1786 }
1787 
1788 #ifdef DDB
1789 #include <machine/db_machdep.h>
1790 #include <ddb/db_output.h>
1791 
1792 void
1793 db_print_sa(struct sockaddr *sa)
1794 {
1795 	int len;
1796 	u_char *p;
1797 
1798 	if (sa == NULL) {
1799 		db_printf("[NULL]");
1800 		return;
1801 	}
1802 
1803 	p = (u_char *)sa;
1804 	len = sa->sa_len;
1805 	db_printf("[");
1806 	while (len > 0) {
1807 		db_printf("%d", *p);
1808 		p++;
1809 		len--;
1810 		if (len)
1811 			db_printf(",");
1812 	}
1813 	db_printf("]\n");
1814 }
1815 
1816 void
1817 db_print_ifa(struct ifaddr *ifa)
1818 {
1819 	if (ifa == NULL)
1820 		return;
1821 	db_printf("  ifa_addr=");
1822 	db_print_sa(ifa->ifa_addr);
1823 	db_printf("  ifa_dsta=");
1824 	db_print_sa(ifa->ifa_dstaddr);
1825 	db_printf("  ifa_mask=");
1826 	db_print_sa(ifa->ifa_netmask);
1827 	db_printf("  flags=0x%x, refcnt=%d, metric=%d\n",
1828 	    ifa->ifa_flags, ifa->ifa_refcnt, ifa->ifa_metric);
1829 }
1830 
1831 /*
1832  * Function to pass to rtalble_walk().
1833  * Return non-zero error to abort walk.
1834  */
1835 int
1836 db_show_rtentry(struct rtentry *rt, void *w, unsigned int id)
1837 {
1838 	db_printf("rtentry=%p", rt);
1839 
1840 	db_printf(" flags=0x%x refcnt=%d use=%llu expire=%lld rtableid=%u\n",
1841 	    rt->rt_flags, rt->rt_refcnt, rt->rt_use, rt->rt_expire, id);
1842 
1843 	db_printf(" key="); db_print_sa(rt_key(rt));
1844 	db_printf(" plen=%d", rt_plen(rt));
1845 	db_printf(" gw="); db_print_sa(rt->rt_gateway);
1846 	db_printf(" ifidx=%u ", rt->rt_ifidx);
1847 	db_printf(" ifa=%p\n", rt->rt_ifa);
1848 	db_print_ifa(rt->rt_ifa);
1849 
1850 	db_printf(" gwroute=%p llinfo=%p\n", rt->rt_gwroute, rt->rt_llinfo);
1851 	return (0);
1852 }
1853 
1854 /*
1855  * Function to print all the route trees.
1856  * Use this from ddb:  "call db_show_arptab"
1857  */
1858 int
1859 db_show_arptab(void)
1860 {
1861 	db_printf("Route tree for AF_INET\n");
1862 	rtable_walk(0, AF_INET, db_show_rtentry, NULL);
1863 	return (0);
1864 }
1865 #endif /* DDB */
1866