xref: /dragonfly/sys/net/route.c (revision a563ca70)
1 /*
2  * Copyright (c) 2004, 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Berkeley and its contributors.
49  * 4. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)route.c	8.3 (Berkeley) 1/9/95
66  * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
67  * $DragonFly: src/sys/net/route.c,v 1.41 2008/11/09 10:50:15 sephe Exp $
68  */
69 
70 #include "opt_inet.h"
71 #include "opt_mpls.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/socket.h>
78 #include <sys/domain.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/globaldata.h>
82 #include <sys/thread.h>
83 
84 #include <net/if.h>
85 #include <net/route.h>
86 #include <net/netisr.h>
87 
88 #include <netinet/in.h>
89 #include <net/ip_mroute/ip_mroute.h>
90 
91 #include <sys/thread2.h>
92 #include <sys/msgport2.h>
93 #include <net/netmsg2.h>
94 
95 #ifdef MPLS
96 #include <netproto/mpls/mpls.h>
97 #endif
98 
99 static struct rtstatistics rtstatistics_percpu[MAXCPU];
100 #ifdef SMP
101 #define rtstat	rtstatistics_percpu[mycpuid]
102 #else
103 #define rtstat	rtstatistics_percpu[0]
104 #endif
105 
106 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
107 struct lwkt_port *rt_ports[MAXCPU];
108 
109 static void	rt_maskedcopy (struct sockaddr *, struct sockaddr *,
110 			       struct sockaddr *);
111 static void rtable_init(void);
112 static void rtable_service_loop(void *dummy);
113 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
114 				      struct rtentry *, void *);
115 
116 #ifdef SMP
117 static void rtredirect_msghandler(netmsg_t msg);
118 static void rtrequest1_msghandler(netmsg_t msg);
119 #endif
120 static void rtsearch_msghandler(netmsg_t msg);
121 static void rtmask_add_msghandler(netmsg_t msg);
122 
123 static int rt_setshims(struct rtentry *, struct sockaddr **);
124 
125 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
126 
127 #ifdef ROUTE_DEBUG
128 static int route_debug = 1;
129 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
130            &route_debug, 0, "");
131 #endif
132 
133 int route_assert_owner_access = 0;
134 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW,
135            &route_assert_owner_access, 0, "");
136 
137 /*
138  * Initialize the route table(s) for protocol domains and
139  * create a helper thread which will be responsible for updating
140  * route table entries on each cpu.
141  */
142 void
143 route_init(void)
144 {
145 	int cpu;
146 	thread_t rtd;
147 
148 	for (cpu = 0; cpu < ncpus; ++cpu)
149 		bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
150 	rn_init();      /* initialize all zeroes, all ones, mask table */
151 	rtable_init();	/* call dom_rtattach() on each cpu */
152 
153 	for (cpu = 0; cpu < ncpus; cpu++) {
154 		lwkt_create(rtable_service_loop, NULL, &rtd, NULL,
155 			    0, cpu, "rtable_cpu %d", cpu);
156 		rt_ports[cpu] = &rtd->td_msgport;
157 	}
158 }
159 
160 static void
161 rtable_init_oncpu(netmsg_t msg)
162 {
163 	struct domain *dom;
164 	int cpu = mycpuid;
165 
166 	SLIST_FOREACH(dom, &domains, dom_next) {
167 		if (dom->dom_rtattach) {
168 			dom->dom_rtattach(
169 				(void **)&rt_tables[cpu][dom->dom_family],
170 			        dom->dom_rtoffset);
171 		}
172 	}
173 	ifnet_forwardmsg(&msg->lmsg, cpu + 1);
174 }
175 
176 static void
177 rtable_init(void)
178 {
179 	struct netmsg_base msg;
180 
181 	netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
182 	ifnet_domsg(&msg.lmsg, 0);
183 }
184 
185 /*
186  * Our per-cpu table management protocol thread.  All route table operations
187  * are sequentially chained through all cpus starting at cpu #0 in order to
188  * maintain duplicate route tables on each cpu.  Having a spearate route
189  * table management thread allows the protocol and interrupt threads to
190  * issue route table changes.
191  */
192 static void
193 rtable_service_loop(void *dummy __unused)
194 {
195 	netmsg_base_t msg;
196 	thread_t td = curthread;
197 
198 	while ((msg = lwkt_waitport(&td->td_msgport, 0)) != NULL) {
199 		msg->nm_dispatch((netmsg_t)msg);
200 	}
201 }
202 
203 /*
204  * Routing statistics.
205  */
206 #ifdef SMP
207 static int
208 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
209 {
210 	int cpu, error = 0;
211 
212 	for (cpu = 0; cpu < ncpus; ++cpu) {
213 		if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
214 					sizeof(struct rtstatistics))))
215 				break;
216 		if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
217 					sizeof(struct rtstatistics))))
218 				break;
219 	}
220 
221 	return (error);
222 }
223 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
224 	0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
225 #else
226 SYSCTL_STRUCT(_net_route, OID_AUTO, stats, CTLFLAG_RW, &rtstat, rtstatistics,
227 "Routing statistics");
228 #endif
229 
230 /*
231  * Packet routing routines.
232  */
233 
234 /*
235  * Look up and fill in the "ro_rt" rtentry field in a route structure given
236  * an address in the "ro_dst" field.  Always send a report on a miss and
237  * always clone routes.
238  */
239 void
240 rtalloc(struct route *ro)
241 {
242 	rtalloc_ign(ro, 0UL);
243 }
244 
245 /*
246  * Look up and fill in the "ro_rt" rtentry field in a route structure given
247  * an address in the "ro_dst" field.  Always send a report on a miss and
248  * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
249  * ignored.
250  */
251 void
252 rtalloc_ign(struct route *ro, u_long ignoreflags)
253 {
254 	if (ro->ro_rt != NULL) {
255 		if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
256 			return;
257 		rtfree(ro->ro_rt);
258 		ro->ro_rt = NULL;
259 	}
260 	ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
261 }
262 
263 /*
264  * Look up the route that matches the given "dst" address.
265  *
266  * Route lookup can have the side-effect of creating and returning
267  * a cloned route instead when "dst" matches a cloning route and the
268  * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
269  *
270  * Any route returned has its reference count incremented.
271  */
272 struct rtentry *
273 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
274 {
275 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
276 	struct rtentry *rt;
277 
278 	if (rnh == NULL)
279 		goto unreach;
280 
281 	/*
282 	 * Look up route in the radix tree.
283 	 */
284 	rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
285 	if (rt == NULL)
286 		goto unreach;
287 
288 	/*
289 	 * Handle cloning routes.
290 	 */
291 	if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
292 		struct rtentry *clonedroute;
293 		int error;
294 
295 		clonedroute = rt;	/* copy in/copy out parameter */
296 		error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
297 				  &clonedroute);	/* clone the route */
298 		if (error != 0) {	/* cloning failed */
299 			if (generate_report)
300 				rt_dstmsg(RTM_MISS, dst, error);
301 			rt->rt_refcnt++;
302 			return (rt);	/* return the uncloned route */
303 		}
304 		if (generate_report) {
305 			if (clonedroute->rt_flags & RTF_XRESOLVE)
306 				rt_dstmsg(RTM_RESOLVE, dst, 0);
307 			else
308 				rt_rtmsg(RTM_ADD, clonedroute,
309 					 clonedroute->rt_ifp, 0);
310 		}
311 		return (clonedroute);	/* return cloned route */
312 	}
313 
314 	/*
315 	 * Increment the reference count of the matched route and return.
316 	 */
317 	rt->rt_refcnt++;
318 	return (rt);
319 
320 unreach:
321 	rtstat.rts_unreach++;
322 	if (generate_report)
323 		rt_dstmsg(RTM_MISS, dst, 0);
324 	return (NULL);
325 }
326 
327 void
328 rtfree(struct rtentry *rt)
329 {
330 	if (rt->rt_cpuid == mycpuid)
331 		rtfree_oncpu(rt);
332 	else
333 		rtfree_remote(rt, 1);
334 }
335 
336 void
337 rtfree_oncpu(struct rtentry *rt)
338 {
339 	KKASSERT(rt->rt_cpuid == mycpuid);
340 	KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
341 
342 	--rt->rt_refcnt;
343 	if (rt->rt_refcnt == 0) {
344 		struct radix_node_head *rnh =
345 		    rt_tables[mycpuid][rt_key(rt)->sa_family];
346 
347 		if (rnh->rnh_close)
348 			rnh->rnh_close((struct radix_node *)rt, rnh);
349 		if (!(rt->rt_flags & RTF_UP)) {
350 			/* deallocate route */
351 			if (rt->rt_ifa != NULL)
352 				IFAFREE(rt->rt_ifa);
353 			if (rt->rt_parent != NULL)
354 				RTFREE(rt->rt_parent);	/* recursive call! */
355 			Free(rt_key(rt));
356 			Free(rt);
357 		}
358 	}
359 }
360 
361 static void
362 rtfree_remote_dispatch(netmsg_t msg)
363 {
364 	struct lwkt_msg *lmsg = &msg->lmsg;
365 	struct rtentry *rt = lmsg->u.ms_resultp;
366 
367 	rtfree_oncpu(rt);
368 	lwkt_replymsg(lmsg, 0);
369 }
370 
371 void
372 rtfree_remote(struct rtentry *rt, int allow_panic)
373 {
374 	struct netmsg_base msg;
375 	struct lwkt_msg *lmsg;
376 
377 	KKASSERT(rt->rt_cpuid != mycpuid);
378 
379 	if (route_assert_owner_access && allow_panic) {
380 		panic("rt remote free rt_cpuid %d, mycpuid %d\n",
381 		      rt->rt_cpuid, mycpuid);
382 	} else {
383 		kprintf("rt remote free rt_cpuid %d, mycpuid %d\n",
384 			rt->rt_cpuid, mycpuid);
385 		print_backtrace(-1);
386 	}
387 
388 	netmsg_init(&msg, NULL, &curthread->td_msgport,
389 		    0, rtfree_remote_dispatch);
390 	lmsg = &msg.lmsg;
391 	lmsg->u.ms_resultp = rt;
392 
393 	lwkt_domsg(rtable_portfn(rt->rt_cpuid), lmsg, 0);
394 }
395 
396 static int
397 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
398 		 struct sockaddr *netmask, int flags, struct sockaddr *src)
399 {
400 	struct rtentry *rt = NULL;
401 	struct rt_addrinfo rtinfo;
402 	struct ifaddr *ifa;
403 	u_long *stat = NULL;
404 	int error;
405 
406 	/* verify the gateway is directly reachable */
407 	if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
408 		error = ENETUNREACH;
409 		goto out;
410 	}
411 
412 	/*
413 	 * If the redirect isn't from our current router for this destination,
414 	 * it's either old or wrong.
415 	 */
416 	if (!(flags & RTF_DONE) &&		/* XXX JH */
417 	    (rt = rtpurelookup(dst)) != NULL &&
418 	    (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
419 		error = EINVAL;
420 		goto done;
421 	}
422 
423 	/*
424 	 * If it redirects us to ourselves, we have a routing loop,
425 	 * perhaps as a result of an interface going down recently.
426 	 */
427 	if (ifa_ifwithaddr(gateway)) {
428 		error = EHOSTUNREACH;
429 		goto done;
430 	}
431 
432 	/*
433 	 * Create a new entry if the lookup failed or if we got back
434 	 * a wildcard entry for the default route.  This is necessary
435 	 * for hosts which use routing redirects generated by smart
436 	 * gateways to dynamically build the routing tables.
437 	 */
438 	if (rt == NULL)
439 		goto create;
440 	if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
441 		rtfree(rt);
442 		goto create;
443 	}
444 
445 	/* Ignore redirects for directly connected hosts. */
446 	if (!(rt->rt_flags & RTF_GATEWAY)) {
447 		error = EHOSTUNREACH;
448 		goto done;
449 	}
450 
451 	if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
452 		/*
453 		 * Changing from a network route to a host route.
454 		 * Create a new host route rather than smashing the
455 		 * network route.
456 		 */
457 create:
458 		flags |=  RTF_GATEWAY | RTF_DYNAMIC;
459 		bzero(&rtinfo, sizeof(struct rt_addrinfo));
460 		rtinfo.rti_info[RTAX_DST] = dst;
461 		rtinfo.rti_info[RTAX_GATEWAY] = gateway;
462 		rtinfo.rti_info[RTAX_NETMASK] = netmask;
463 		rtinfo.rti_flags = flags;
464 		rtinfo.rti_ifa = ifa;
465 		rt = NULL;	/* copy-in/copy-out parameter */
466 		error = rtrequest1(RTM_ADD, &rtinfo, &rt);
467 		if (rt != NULL)
468 			flags = rt->rt_flags;
469 		stat = &rtstat.rts_dynamic;
470 	} else {
471 		/*
472 		 * Smash the current notion of the gateway to this destination.
473 		 * Should check about netmask!!!
474 		 */
475 		rt->rt_flags |= RTF_MODIFIED;
476 		flags |= RTF_MODIFIED;
477 
478 		/* We only need to report rtmsg on CPU0 */
479 		rt_setgate(rt, rt_key(rt), gateway,
480 			   mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
481 		error = 0;
482 		stat = &rtstat.rts_newgateway;
483 	}
484 
485 done:
486 	if (rt != NULL)
487 		rtfree(rt);
488 out:
489 	if (error != 0)
490 		rtstat.rts_badredirect++;
491 	else if (stat != NULL)
492 		(*stat)++;
493 
494 	return error;
495 }
496 
497 #ifdef SMP
498 
499 struct netmsg_rtredirect {
500 	struct netmsg_base base;
501 	struct sockaddr *dst;
502 	struct sockaddr *gateway;
503 	struct sockaddr *netmask;
504 	int		flags;
505 	struct sockaddr *src;
506 };
507 
508 #endif
509 
510 /*
511  * Force a routing table entry to the specified
512  * destination to go through the given gateway.
513  * Normally called as a result of a routing redirect
514  * message from the network layer.
515  *
516  * N.B.: must be called at splnet
517  */
518 void
519 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
520 	   struct sockaddr *netmask, int flags, struct sockaddr *src)
521 {
522 	struct rt_addrinfo rtinfo;
523 	int error;
524 #ifdef SMP
525 	struct netmsg_rtredirect msg;
526 
527 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
528 		    0, rtredirect_msghandler);
529 	msg.dst = dst;
530 	msg.gateway = gateway;
531 	msg.netmask = netmask;
532 	msg.flags = flags;
533 	msg.src = src;
534 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
535 #else
536 	error = rtredirect_oncpu(dst, gateway, netmask, flags, src);
537 #endif
538 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
539 	rtinfo.rti_info[RTAX_DST] = dst;
540 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
541 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
542 	rtinfo.rti_info[RTAX_AUTHOR] = src;
543 	rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
544 }
545 
546 #ifdef SMP
547 
548 static void
549 rtredirect_msghandler(netmsg_t msg)
550 {
551 	struct netmsg_rtredirect *rmsg = (void *)msg;
552 	int nextcpu;
553 
554 	rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
555 			 rmsg->flags, rmsg->src);
556 	nextcpu = mycpuid + 1;
557 	if (nextcpu < ncpus)
558 		lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->lmsg);
559 	else
560 		lwkt_replymsg(&msg->lmsg, 0);
561 }
562 
563 #endif
564 
565 /*
566 * Routing table ioctl interface.
567 */
568 int
569 rtioctl(u_long req, caddr_t data, struct ucred *cred)
570 {
571 #ifdef INET
572 	/* Multicast goop, grrr... */
573 	return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
574 #else
575 	return ENXIO;
576 #endif
577 }
578 
579 struct ifaddr *
580 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
581 {
582 	struct ifaddr *ifa;
583 
584 	if (!(flags & RTF_GATEWAY)) {
585 		/*
586 		 * If we are adding a route to an interface,
587 		 * and the interface is a point-to-point link,
588 		 * we should search for the destination
589 		 * as our clue to the interface.  Otherwise
590 		 * we can use the local address.
591 		 */
592 		ifa = NULL;
593 		if (flags & RTF_HOST) {
594 			ifa = ifa_ifwithdstaddr(dst);
595 		}
596 		if (ifa == NULL)
597 			ifa = ifa_ifwithaddr(gateway);
598 	} else {
599 		/*
600 		 * If we are adding a route to a remote net
601 		 * or host, the gateway may still be on the
602 		 * other end of a pt to pt link.
603 		 */
604 		ifa = ifa_ifwithdstaddr(gateway);
605 	}
606 	if (ifa == NULL)
607 		ifa = ifa_ifwithnet(gateway);
608 	if (ifa == NULL) {
609 		struct rtentry *rt;
610 
611 		rt = rtpurelookup(gateway);
612 		if (rt == NULL)
613 			return (NULL);
614 		rt->rt_refcnt--;
615 		if ((ifa = rt->rt_ifa) == NULL)
616 			return (NULL);
617 	}
618 	if (ifa->ifa_addr->sa_family != dst->sa_family) {
619 		struct ifaddr *oldifa = ifa;
620 
621 		ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
622 		if (ifa == NULL)
623 			ifa = oldifa;
624 	}
625 	return (ifa);
626 }
627 
628 static int rt_fixdelete (struct radix_node *, void *);
629 static int rt_fixchange (struct radix_node *, void *);
630 
631 struct rtfc_arg {
632 	struct rtentry *rt0;
633 	struct radix_node_head *rnh;
634 };
635 
636 /*
637  * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
638  */
639 int
640 rt_getifa(struct rt_addrinfo *rtinfo)
641 {
642 	struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
643 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
644 	struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
645 	int flags = rtinfo->rti_flags;
646 
647 	/*
648 	 * ifp may be specified by sockaddr_dl
649 	 * when protocol address is ambiguous.
650 	 */
651 	if (rtinfo->rti_ifp == NULL) {
652 		struct sockaddr *ifpaddr;
653 
654 		ifpaddr = rtinfo->rti_info[RTAX_IFP];
655 		if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
656 			struct ifaddr *ifa;
657 
658 			ifa = ifa_ifwithnet(ifpaddr);
659 			if (ifa != NULL)
660 				rtinfo->rti_ifp = ifa->ifa_ifp;
661 		}
662 	}
663 
664 	if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
665 		rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
666 	if (rtinfo->rti_ifa == NULL) {
667 		struct sockaddr *sa;
668 
669 		sa = ifaaddr != NULL ? ifaaddr :
670 		    (gateway != NULL ? gateway : dst);
671 		if (sa != NULL && rtinfo->rti_ifp != NULL)
672 			rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
673 		else if (dst != NULL && gateway != NULL)
674 			rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
675 		else if (sa != NULL)
676 			rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
677 	}
678 	if (rtinfo->rti_ifa == NULL)
679 		return (ENETUNREACH);
680 
681 	if (rtinfo->rti_ifp == NULL)
682 		rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
683 	return (0);
684 }
685 
686 /*
687  * Do appropriate manipulations of a routing tree given
688  * all the bits of info needed
689  */
690 int
691 rtrequest(
692 	int req,
693 	struct sockaddr *dst,
694 	struct sockaddr *gateway,
695 	struct sockaddr *netmask,
696 	int flags,
697 	struct rtentry **ret_nrt)
698 {
699 	struct rt_addrinfo rtinfo;
700 
701 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
702 	rtinfo.rti_info[RTAX_DST] = dst;
703 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
704 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
705 	rtinfo.rti_flags = flags;
706 	return rtrequest1(req, &rtinfo, ret_nrt);
707 }
708 
709 int
710 rtrequest_global(
711 	int req,
712 	struct sockaddr *dst,
713 	struct sockaddr *gateway,
714 	struct sockaddr *netmask,
715 	int flags)
716 {
717 	struct rt_addrinfo rtinfo;
718 
719 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
720 	rtinfo.rti_info[RTAX_DST] = dst;
721 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
722 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
723 	rtinfo.rti_flags = flags;
724 	return rtrequest1_global(req, &rtinfo, NULL, NULL);
725 }
726 
727 #ifdef SMP
728 
729 struct netmsg_rtq {
730 	struct netmsg_base	base;
731 	int			req;
732 	struct rt_addrinfo	*rtinfo;
733 	rtrequest1_callback_func_t callback;
734 	void			*arg;
735 };
736 
737 #endif
738 
739 int
740 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
741 		  rtrequest1_callback_func_t callback, void *arg)
742 {
743 	int error;
744 #ifdef SMP
745 	struct netmsg_rtq msg;
746 
747 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
748 		    0, rtrequest1_msghandler);
749 	msg.base.lmsg.ms_error = -1;
750 	msg.req = req;
751 	msg.rtinfo = rtinfo;
752 	msg.callback = callback;
753 	msg.arg = arg;
754 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
755 #else
756 	struct rtentry *rt = NULL;
757 
758 	error = rtrequest1(req, rtinfo, &rt);
759 	if (rt)
760 		--rt->rt_refcnt;
761 	if (callback)
762 		callback(req, error, rtinfo, rt, arg);
763 #endif
764 	return (error);
765 }
766 
767 /*
768  * Handle a route table request on the current cpu.  Since the route table's
769  * are supposed to be identical on each cpu, an error occuring later in the
770  * message chain is considered system-fatal.
771  */
772 #ifdef SMP
773 
774 static void
775 rtrequest1_msghandler(netmsg_t msg)
776 {
777 	struct netmsg_rtq *rmsg = (void *)msg;
778 	struct rt_addrinfo rtinfo;
779 	struct rtentry *rt = NULL;
780 	int nextcpu;
781 	int error;
782 
783 	/*
784 	 * Copy the rtinfo.  We need to make sure that the original
785 	 * rtinfo, which is setup by the caller, in the netmsg will
786 	 * _not_ be changed; else the next CPU on the netmsg forwarding
787 	 * path will see a different rtinfo than what this CPU has seen.
788 	 */
789 	rtinfo = *rmsg->rtinfo;
790 
791 	error = rtrequest1(rmsg->req, &rtinfo, &rt);
792 	if (rt)
793 		--rt->rt_refcnt;
794 	if (rmsg->callback)
795 		rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
796 
797 	/*
798 	 * RTM_DELETE's are propogated even if an error occurs, since a
799 	 * cloned route might be undergoing deletion and cloned routes
800 	 * are not necessarily replicated.  An overall error is returned
801 	 * only if no cpus have the route in question.
802 	 */
803 	if (rmsg->base.lmsg.ms_error < 0 || error == 0)
804 		rmsg->base.lmsg.ms_error = error;
805 
806 	nextcpu = mycpuid + 1;
807 	if (error && rmsg->req != RTM_DELETE) {
808 		if (mycpuid != 0) {
809 			panic("rtrequest1_msghandler: rtrequest table "
810 			      "error was not on cpu #0");
811 		}
812 		lwkt_replymsg(&rmsg->base.lmsg, error);
813 	} else if (nextcpu < ncpus) {
814 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
815 	} else {
816 		lwkt_replymsg(&rmsg->base.lmsg, rmsg->base.lmsg.ms_error);
817 	}
818 }
819 
820 #endif
821 
822 int
823 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
824 {
825 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
826 	struct rtentry *rt;
827 	struct radix_node *rn;
828 	struct radix_node_head *rnh;
829 	struct ifaddr *ifa;
830 	struct sockaddr *ndst;
831 	boolean_t reportmsg;
832 	int error = 0;
833 
834 #define gotoerr(x) { error = x ; goto bad; }
835 
836 #ifdef ROUTE_DEBUG
837 	if (route_debug)
838 		rt_addrinfo_print(req, rtinfo);
839 #endif
840 
841 	crit_enter();
842 	/*
843 	 * Find the correct routing tree to use for this Address Family
844 	 */
845 	if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
846 		gotoerr(EAFNOSUPPORT);
847 
848 	/*
849 	 * If we are adding a host route then we don't want to put
850 	 * a netmask in the tree, nor do we want to clone it.
851 	 */
852 	if (rtinfo->rti_flags & RTF_HOST) {
853 		rtinfo->rti_info[RTAX_NETMASK] = NULL;
854 		rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
855 	}
856 
857 	switch (req) {
858 	case RTM_DELETE:
859 		/* Remove the item from the tree. */
860 		rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
861 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
862 				      rnh);
863 		if (rn == NULL)
864 			gotoerr(ESRCH);
865 		KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
866 			("rnh_deladdr returned flags 0x%x", rn->rn_flags));
867 		rt = (struct rtentry *)rn;
868 
869 		/* ref to prevent a deletion race */
870 		++rt->rt_refcnt;
871 
872 		/* Free any routes cloned from this one. */
873 		if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
874 		    rt_mask(rt) != NULL) {
875 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
876 					       (char *)rt_mask(rt),
877 					       rt_fixdelete, rt);
878 		}
879 
880 		if (rt->rt_gwroute != NULL) {
881 			RTFREE(rt->rt_gwroute);
882 			rt->rt_gwroute = NULL;
883 		}
884 
885 		/*
886 		 * NB: RTF_UP must be set during the search above,
887 		 * because we might delete the last ref, causing
888 		 * rt to get freed prematurely.
889 		 */
890 		rt->rt_flags &= ~RTF_UP;
891 
892 #ifdef ROUTE_DEBUG
893 		if (route_debug)
894 			rt_print(rtinfo, rt);
895 #endif
896 
897 		/* Give the protocol a chance to keep things in sync. */
898 		if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
899 			ifa->ifa_rtrequest(RTM_DELETE, rt, rtinfo);
900 
901 		/*
902 		 * If the caller wants it, then it can have it,
903 		 * but it's up to it to free the rtentry as we won't be
904 		 * doing it.
905 		 */
906 		KASSERT(rt->rt_refcnt >= 0,
907 			("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
908 		if (ret_nrt != NULL) {
909 			/* leave ref intact for return */
910 			*ret_nrt = rt;
911 		} else {
912 			/* deref / attempt to destroy */
913 			rtfree(rt);
914 		}
915 		break;
916 
917 	case RTM_RESOLVE:
918 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
919 			gotoerr(EINVAL);
920 		ifa = rt->rt_ifa;
921 		rtinfo->rti_flags =
922 		    rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
923 		rtinfo->rti_flags |= RTF_WASCLONED;
924 		rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
925 		if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
926 			rtinfo->rti_flags |= RTF_HOST;
927 		rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
928 		rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
929 		rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
930 		goto makeroute;
931 
932 	case RTM_ADD:
933 		KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
934 			rtinfo->rti_info[RTAX_GATEWAY] != NULL,
935 		    ("rtrequest: GATEWAY but no gateway"));
936 
937 		if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
938 			gotoerr(error);
939 		ifa = rtinfo->rti_ifa;
940 makeroute:
941 		R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
942 		if (rt == NULL)
943 			gotoerr(ENOBUFS);
944 		bzero(rt, sizeof(struct rtentry));
945 		rt->rt_flags = RTF_UP | rtinfo->rti_flags;
946 		rt->rt_cpuid = mycpuid;
947 
948 		if (mycpuid != 0 && req == RTM_ADD) {
949 			/* For RTM_ADD, we have already sent rtmsg on CPU0. */
950 			reportmsg = RTL_DONTREPORT;
951 		} else {
952 			/*
953 			 * For RTM_ADD, we only send rtmsg on CPU0.
954 			 * For RTM_RESOLVE, we always send rtmsg. XXX
955 			 */
956 			reportmsg = RTL_REPORTMSG;
957 		}
958 		error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
959 				   reportmsg);
960 		if (error != 0) {
961 			Free(rt);
962 			gotoerr(error);
963 		}
964 
965 		ndst = rt_key(rt);
966 		if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
967 			rt_maskedcopy(dst, ndst,
968 				      rtinfo->rti_info[RTAX_NETMASK]);
969 		else
970 			bcopy(dst, ndst, dst->sa_len);
971 
972 		if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
973 			rt_setshims(rt, rtinfo->rti_info);
974 
975 		/*
976 		 * Note that we now have a reference to the ifa.
977 		 * This moved from below so that rnh->rnh_addaddr() can
978 		 * examine the ifa and  ifa->ifa_ifp if it so desires.
979 		 */
980 		IFAREF(ifa);
981 		rt->rt_ifa = ifa;
982 		rt->rt_ifp = ifa->ifa_ifp;
983 		/* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
984 
985 		rn = rnh->rnh_addaddr((char *)ndst,
986 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
987 				      rnh, rt->rt_nodes);
988 		if (rn == NULL) {
989 			struct rtentry *oldrt;
990 
991 			/*
992 			 * We already have one of these in the tree.
993 			 * We do a special hack: if the old route was
994 			 * cloned, then we blow it away and try
995 			 * re-inserting the new one.
996 			 */
997 			oldrt = rtpurelookup(ndst);
998 			if (oldrt != NULL) {
999 				--oldrt->rt_refcnt;
1000 				if (oldrt->rt_flags & RTF_WASCLONED) {
1001 					rtrequest(RTM_DELETE, rt_key(oldrt),
1002 						  oldrt->rt_gateway,
1003 						  rt_mask(oldrt),
1004 						  oldrt->rt_flags, NULL);
1005 					rn = rnh->rnh_addaddr((char *)ndst,
1006 					    (char *)
1007 						rtinfo->rti_info[RTAX_NETMASK],
1008 					    rnh, rt->rt_nodes);
1009 				}
1010 			}
1011 		}
1012 
1013 		/*
1014 		 * If it still failed to go into the tree,
1015 		 * then un-make it (this should be a function).
1016 		 */
1017 		if (rn == NULL) {
1018 			if (rt->rt_gwroute != NULL)
1019 				rtfree(rt->rt_gwroute);
1020 			IFAFREE(ifa);
1021 			Free(rt_key(rt));
1022 			Free(rt);
1023 			gotoerr(EEXIST);
1024 		}
1025 
1026 		/*
1027 		 * If we got here from RESOLVE, then we are cloning
1028 		 * so clone the rest, and note that we
1029 		 * are a clone (and increment the parent's references)
1030 		 */
1031 		if (req == RTM_RESOLVE) {
1032 			rt->rt_rmx = (*ret_nrt)->rt_rmx;    /* copy metrics */
1033 			rt->rt_rmx.rmx_pksent = 0;  /* reset packet counter */
1034 			if ((*ret_nrt)->rt_flags &
1035 				       (RTF_CLONING | RTF_PRCLONING)) {
1036 				rt->rt_parent = *ret_nrt;
1037 				(*ret_nrt)->rt_refcnt++;
1038 			}
1039 		}
1040 
1041 		/*
1042 		 * if this protocol has something to add to this then
1043 		 * allow it to do that as well.
1044 		 */
1045 		if (ifa->ifa_rtrequest != NULL)
1046 			ifa->ifa_rtrequest(req, rt, rtinfo);
1047 
1048 		/*
1049 		 * We repeat the same procedure from rt_setgate() here because
1050 		 * it doesn't fire when we call it there because the node
1051 		 * hasn't been added to the tree yet.
1052 		 */
1053 		if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
1054 		    rt_mask(rt) != NULL) {
1055 			struct rtfc_arg arg = { rt, rnh };
1056 
1057 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1058 					       (char *)rt_mask(rt),
1059 					       rt_fixchange, &arg);
1060 		}
1061 
1062 #ifdef ROUTE_DEBUG
1063 		if (route_debug)
1064 			rt_print(rtinfo, rt);
1065 #endif
1066 		/*
1067 		 * Return the resulting rtentry,
1068 		 * increasing the number of references by one.
1069 		 */
1070 		if (ret_nrt != NULL) {
1071 			rt->rt_refcnt++;
1072 			*ret_nrt = rt;
1073 		}
1074 		break;
1075 	default:
1076 		error = EOPNOTSUPP;
1077 	}
1078 bad:
1079 #ifdef ROUTE_DEBUG
1080 	if (route_debug) {
1081 		if (error)
1082 			kprintf("rti %p failed error %d\n", rtinfo, error);
1083 		else
1084 			kprintf("rti %p succeeded\n", rtinfo);
1085 	}
1086 #endif
1087 	crit_exit();
1088 	return (error);
1089 }
1090 
1091 /*
1092  * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1093  * (i.e., the routes related to it by the operation of cloning).  This
1094  * routine is iterated over all potential former-child-routes by way of
1095  * rnh->rnh_walktree_from() above, and those that actually are children of
1096  * the late parent (passed in as VP here) are themselves deleted.
1097  */
1098 static int
1099 rt_fixdelete(struct radix_node *rn, void *vp)
1100 {
1101 	struct rtentry *rt = (struct rtentry *)rn;
1102 	struct rtentry *rt0 = vp;
1103 
1104 	if (rt->rt_parent == rt0 &&
1105 	    !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1106 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1107 				 rt->rt_flags, NULL);
1108 	}
1109 	return 0;
1110 }
1111 
1112 /*
1113  * This routine is called from rt_setgate() to do the analogous thing for
1114  * adds and changes.  There is the added complication in this case of a
1115  * middle insert; i.e., insertion of a new network route between an older
1116  * network route and (cloned) host routes.  For this reason, a simple check
1117  * of rt->rt_parent is insufficient; each candidate route must be tested
1118  * against the (mask, value) of the new route (passed as before in vp)
1119  * to see if the new route matches it.
1120  *
1121  * XXX - it may be possible to do fixdelete() for changes and reserve this
1122  * routine just for adds.  I'm not sure why I thought it was necessary to do
1123  * changes this way.
1124  */
1125 #ifdef DEBUG
1126 static int rtfcdebug = 0;
1127 #endif
1128 
1129 static int
1130 rt_fixchange(struct radix_node *rn, void *vp)
1131 {
1132 	struct rtentry *rt = (struct rtentry *)rn;
1133 	struct rtfc_arg *ap = vp;
1134 	struct rtentry *rt0 = ap->rt0;
1135 	struct radix_node_head *rnh = ap->rnh;
1136 	u_char *xk1, *xm1, *xk2, *xmp;
1137 	int i, len, mlen;
1138 
1139 #ifdef DEBUG
1140 	if (rtfcdebug)
1141 		kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1142 #endif
1143 
1144 	if (rt->rt_parent == NULL ||
1145 	    (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1146 #ifdef DEBUG
1147 		if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1148 #endif
1149 		return 0;
1150 	}
1151 
1152 	if (rt->rt_parent == rt0) {
1153 #ifdef DEBUG
1154 		if (rtfcdebug) kprintf("parent match\n");
1155 #endif
1156 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1157 				 rt->rt_flags, NULL);
1158 	}
1159 
1160 	/*
1161 	 * There probably is a function somewhere which does this...
1162 	 * if not, there should be.
1163 	 */
1164 	len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1165 
1166 	xk1 = (u_char *)rt_key(rt0);
1167 	xm1 = (u_char *)rt_mask(rt0);
1168 	xk2 = (u_char *)rt_key(rt);
1169 
1170 	/* avoid applying a less specific route */
1171 	xmp = (u_char *)rt_mask(rt->rt_parent);
1172 	mlen = rt_key(rt->rt_parent)->sa_len;
1173 	if (mlen > rt_key(rt0)->sa_len) {
1174 #ifdef DEBUG
1175 		if (rtfcdebug)
1176 			kprintf("rt_fixchange: inserting a less "
1177 			       "specific route\n");
1178 #endif
1179 		return 0;
1180 	}
1181 	for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1182 		if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1183 #ifdef DEBUG
1184 			if (rtfcdebug)
1185 				kprintf("rt_fixchange: inserting a less "
1186 				       "specific route\n");
1187 #endif
1188 			return 0;
1189 		}
1190 	}
1191 
1192 	for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1193 		if ((xk2[i] & xm1[i]) != xk1[i]) {
1194 #ifdef DEBUG
1195 			if (rtfcdebug) kprintf("no match\n");
1196 #endif
1197 			return 0;
1198 		}
1199 	}
1200 
1201 	/*
1202 	 * OK, this node is a clone, and matches the node currently being
1203 	 * changed/added under the node's mask.  So, get rid of it.
1204 	 */
1205 #ifdef DEBUG
1206 	if (rtfcdebug) kprintf("deleting\n");
1207 #endif
1208 	return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1209 			 rt->rt_flags, NULL);
1210 }
1211 
1212 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
1213 
1214 int
1215 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1216 	   boolean_t generate_report)
1217 {
1218 	char *space, *oldspace;
1219 	int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
1220 	struct rtentry *rt = rt0;
1221 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1222 
1223 	/*
1224 	 * A host route with the destination equal to the gateway
1225 	 * will interfere with keeping LLINFO in the routing
1226 	 * table, so disallow it.
1227 	 */
1228 	if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1229 			      (RTF_HOST | RTF_GATEWAY)) &&
1230 	    dst->sa_len == gate->sa_len &&
1231 	    sa_equal(dst, gate)) {
1232 		/*
1233 		 * The route might already exist if this is an RTM_CHANGE
1234 		 * or a routing redirect, so try to delete it.
1235 		 */
1236 		if (rt_key(rt0) != NULL)
1237 			rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1238 				  rt_mask(rt0), rt0->rt_flags, NULL);
1239 		return EADDRNOTAVAIL;
1240 	}
1241 
1242 	/*
1243 	 * Both dst and gateway are stored in the same malloc'ed chunk
1244 	 * (If I ever get my hands on....)
1245 	 * if we need to malloc a new chunk, then keep the old one around
1246 	 * till we don't need it any more.
1247 	 */
1248 	if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
1249 		oldspace = (char *)rt_key(rt);
1250 		R_Malloc(space, char *, dlen + glen);
1251 		if (space == NULL)
1252 			return ENOBUFS;
1253 		rt->rt_nodes->rn_key = space;
1254 	} else {
1255 		space = (char *)rt_key(rt);	/* Just use the old space. */
1256 		oldspace = NULL;
1257 	}
1258 
1259 	/* Set the gateway value. */
1260 	rt->rt_gateway = (struct sockaddr *)(space + dlen);
1261 	bcopy(gate, rt->rt_gateway, glen);
1262 
1263 	if (oldspace != NULL) {
1264 		/*
1265 		 * If we allocated a new chunk, preserve the original dst.
1266 		 * This way, rt_setgate() really just sets the gate
1267 		 * and leaves the dst field alone.
1268 		 */
1269 		bcopy(dst, space, dlen);
1270 		Free(oldspace);
1271 	}
1272 
1273 	/*
1274 	 * If there is already a gwroute, it's now almost definitely wrong
1275 	 * so drop it.
1276 	 */
1277 	if (rt->rt_gwroute != NULL) {
1278 		RTFREE(rt->rt_gwroute);
1279 		rt->rt_gwroute = NULL;
1280 	}
1281 	if (rt->rt_flags & RTF_GATEWAY) {
1282 		/*
1283 		 * Cloning loop avoidance: In the presence of
1284 		 * protocol-cloning and bad configuration, it is
1285 		 * possible to get stuck in bottomless mutual recursion
1286 		 * (rtrequest rt_setgate rtlookup).  We avoid this
1287 		 * by not allowing protocol-cloning to operate for
1288 		 * gateways (which is probably the correct choice
1289 		 * anyway), and avoid the resulting reference loops
1290 		 * by disallowing any route to run through itself as
1291 		 * a gateway.  This is obviously mandatory when we
1292 		 * get rt->rt_output().
1293 		 *
1294 		 * This breaks TTCP for hosts outside the gateway!  XXX JH
1295 		 */
1296 		rt->rt_gwroute = _rtlookup(gate, generate_report,
1297 					   RTF_PRCLONING);
1298 		if (rt->rt_gwroute == rt) {
1299 			rt->rt_gwroute = NULL;
1300 			--rt->rt_refcnt;
1301 			return EDQUOT; /* failure */
1302 		}
1303 	}
1304 
1305 	/*
1306 	 * This isn't going to do anything useful for host routes, so
1307 	 * don't bother.  Also make sure we have a reasonable mask
1308 	 * (we don't yet have one during adds).
1309 	 */
1310 	if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1311 		struct rtfc_arg arg = { rt, rnh };
1312 
1313 		rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1314 				       (char *)rt_mask(rt),
1315 				       rt_fixchange, &arg);
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 static void
1322 rt_maskedcopy(
1323 	struct sockaddr *src,
1324 	struct sockaddr *dst,
1325 	struct sockaddr *netmask)
1326 {
1327 	u_char *cp1 = (u_char *)src;
1328 	u_char *cp2 = (u_char *)dst;
1329 	u_char *cp3 = (u_char *)netmask;
1330 	u_char *cplim = cp2 + *cp3;
1331 	u_char *cplim2 = cp2 + *cp1;
1332 
1333 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1334 	cp3 += 2;
1335 	if (cplim > cplim2)
1336 		cplim = cplim2;
1337 	while (cp2 < cplim)
1338 		*cp2++ = *cp1++ & *cp3++;
1339 	if (cp2 < cplim2)
1340 		bzero(cp2, cplim2 - cp2);
1341 }
1342 
1343 int
1344 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1345 {
1346 	struct rtentry *up_rt, *rt;
1347 
1348 	if (!(rt0->rt_flags & RTF_UP)) {
1349 		up_rt = rtlookup(dst);
1350 		if (up_rt == NULL)
1351 			return (EHOSTUNREACH);
1352 		up_rt->rt_refcnt--;
1353 	} else
1354 		up_rt = rt0;
1355 	if (up_rt->rt_flags & RTF_GATEWAY) {
1356 		if (up_rt->rt_gwroute == NULL) {
1357 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1358 			if (up_rt->rt_gwroute == NULL)
1359 				return (EHOSTUNREACH);
1360 		} else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1361 			rtfree(up_rt->rt_gwroute);
1362 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1363 			if (up_rt->rt_gwroute == NULL)
1364 				return (EHOSTUNREACH);
1365 		}
1366 		rt = up_rt->rt_gwroute;
1367 	} else
1368 		rt = up_rt;
1369 	if (rt->rt_flags & RTF_REJECT &&
1370 	    (rt->rt_rmx.rmx_expire == 0 ||		/* rt doesn't expire */
1371 	     time_second < rt->rt_rmx.rmx_expire))	/* rt not expired */
1372 		return (rt->rt_flags & RTF_HOST ?  EHOSTDOWN : EHOSTUNREACH);
1373 	*drt = rt;
1374 	return 0;
1375 }
1376 
1377 static int
1378 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1379 	int i;
1380 
1381 	for (i=0; i<3; i++) {
1382 		struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1383 		int shimlen;
1384 
1385 		if (shim == NULL)
1386 			break;
1387 
1388 		shimlen = ROUNDUP(shim->sa_len);
1389 		R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1390 		bcopy(shim, rt->rt_shim[i], shimlen);
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 #ifdef ROUTE_DEBUG
1397 
1398 /*
1399  * Print out a route table entry
1400  */
1401 void
1402 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1403 {
1404 	kprintf("rti %p cpu %d route %p flags %08lx: ",
1405 		rtinfo, mycpuid, rn, rn->rt_flags);
1406 	sockaddr_print(rt_key(rn));
1407 	kprintf(" mask ");
1408 	sockaddr_print(rt_mask(rn));
1409 	kprintf(" gw ");
1410 	sockaddr_print(rn->rt_gateway);
1411 	kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1412 	kprintf(" ifa %p\n", rn->rt_ifa);
1413 }
1414 
1415 void
1416 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1417 {
1418 	int didit = 0;
1419 	int i;
1420 
1421 #ifdef ROUTE_DEBUG
1422 	if (cmd == RTM_DELETE && route_debug > 1)
1423 		print_backtrace(-1);
1424 #endif
1425 
1426 	switch(cmd) {
1427 	case RTM_ADD:
1428 		kprintf("ADD ");
1429 		break;
1430 	case RTM_RESOLVE:
1431 		kprintf("RES ");
1432 		break;
1433 	case RTM_DELETE:
1434 		kprintf("DEL ");
1435 		break;
1436 	default:
1437 		kprintf("C%02d ", cmd);
1438 		break;
1439 	}
1440 	kprintf("rti %p cpu %d ", rti, mycpuid);
1441 	for (i = 0; i < rti->rti_addrs; ++i) {
1442 		if (rti->rti_info[i] == NULL)
1443 			continue;
1444 		if (didit)
1445 			kprintf(" ,");
1446 		switch(i) {
1447 		case RTAX_DST:
1448 			kprintf("(DST ");
1449 			break;
1450 		case RTAX_GATEWAY:
1451 			kprintf("(GWY ");
1452 			break;
1453 		case RTAX_NETMASK:
1454 			kprintf("(MSK ");
1455 			break;
1456 		case RTAX_GENMASK:
1457 			kprintf("(GEN ");
1458 			break;
1459 		case RTAX_IFP:
1460 			kprintf("(IFP ");
1461 			break;
1462 		case RTAX_IFA:
1463 			kprintf("(IFA ");
1464 			break;
1465 		case RTAX_AUTHOR:
1466 			kprintf("(AUT ");
1467 			break;
1468 		case RTAX_BRD:
1469 			kprintf("(BRD ");
1470 			break;
1471 		default:
1472 			kprintf("(?%02d ", i);
1473 			break;
1474 		}
1475 		sockaddr_print(rti->rti_info[i]);
1476 		kprintf(")");
1477 		didit = 1;
1478 	}
1479 	kprintf("\n");
1480 }
1481 
1482 void
1483 sockaddr_print(struct sockaddr *sa)
1484 {
1485 	struct sockaddr_in *sa4;
1486 	struct sockaddr_in6 *sa6;
1487 	int len;
1488 	int i;
1489 
1490 	if (sa == NULL) {
1491 		kprintf("NULL");
1492 		return;
1493 	}
1494 
1495 	len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1496 
1497 	switch(sa->sa_family) {
1498 	case AF_INET:
1499 	case AF_INET6:
1500 	default:
1501 		switch(sa->sa_family) {
1502 		case AF_INET:
1503 			sa4 = (struct sockaddr_in *)sa;
1504 			kprintf("INET %d %d.%d.%d.%d",
1505 				ntohs(sa4->sin_port),
1506 				(ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1507 				(ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1508 				(ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1509 				(ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1510 			);
1511 			break;
1512 		case AF_INET6:
1513 			sa6 = (struct sockaddr_in6 *)sa;
1514 			kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1515 				ntohs(sa6->sin6_port),
1516 				sa6->sin6_addr.s6_addr16[0],
1517 				sa6->sin6_addr.s6_addr16[1],
1518 				sa6->sin6_addr.s6_addr16[2],
1519 				sa6->sin6_addr.s6_addr16[3],
1520 				sa6->sin6_addr.s6_addr16[4],
1521 				sa6->sin6_addr.s6_addr16[5],
1522 				sa6->sin6_addr.s6_addr16[6],
1523 				sa6->sin6_addr.s6_addr16[7]
1524 			);
1525 			break;
1526 		default:
1527 			kprintf("AF%d ", sa->sa_family);
1528 			while (len > 0 && sa->sa_data[len-1] == 0)
1529 				--len;
1530 
1531 			for (i = 0; i < len; ++i) {
1532 				if (i)
1533 					kprintf(".");
1534 				kprintf("%d", (unsigned char)sa->sa_data[i]);
1535 			}
1536 			break;
1537 		}
1538 	}
1539 }
1540 
1541 #endif
1542 
1543 /*
1544  * Set up a routing table entry, normally for an interface.
1545  */
1546 int
1547 rtinit(struct ifaddr *ifa, int cmd, int flags)
1548 {
1549 	struct sockaddr *dst, *deldst, *netmask;
1550 	struct mbuf *m = NULL;
1551 	struct radix_node_head *rnh;
1552 	struct radix_node *rn;
1553 	struct rt_addrinfo rtinfo;
1554 	int error;
1555 
1556 	if (flags & RTF_HOST) {
1557 		dst = ifa->ifa_dstaddr;
1558 		netmask = NULL;
1559 	} else {
1560 		dst = ifa->ifa_addr;
1561 		netmask = ifa->ifa_netmask;
1562 	}
1563 	/*
1564 	 * If it's a delete, check that if it exists, it's on the correct
1565 	 * interface or we might scrub a route to another ifa which would
1566 	 * be confusing at best and possibly worse.
1567 	 */
1568 	if (cmd == RTM_DELETE) {
1569 		/*
1570 		 * It's a delete, so it should already exist..
1571 		 * If it's a net, mask off the host bits
1572 		 * (Assuming we have a mask)
1573 		 */
1574 		if (netmask != NULL) {
1575 			m = m_get(MB_DONTWAIT, MT_SONAME);
1576 			if (m == NULL)
1577 				return (ENOBUFS);
1578 			mbuftrackid(m, 34);
1579 			deldst = mtod(m, struct sockaddr *);
1580 			rt_maskedcopy(dst, deldst, netmask);
1581 			dst = deldst;
1582 		}
1583 		/*
1584 		 * Look up an rtentry that is in the routing tree and
1585 		 * contains the correct info.
1586 		 */
1587 		if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1588 		    (rn = rnh->rnh_lookup((char *)dst,
1589 					  (char *)netmask, rnh)) == NULL ||
1590 		    ((struct rtentry *)rn)->rt_ifa != ifa ||
1591 		    !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1592 			if (m != NULL)
1593 				m_free(m);
1594 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1595 		}
1596 		/* XXX */
1597 #if 0
1598 		else {
1599 			/*
1600 			 * One would think that as we are deleting, and we know
1601 			 * it doesn't exist, we could just return at this point
1602 			 * with an "ELSE" clause, but apparently not..
1603 			 */
1604 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1605 		}
1606 #endif
1607 	}
1608 	/*
1609 	 * Do the actual request
1610 	 */
1611 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
1612 	rtinfo.rti_info[RTAX_DST] = dst;
1613 	rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1614 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
1615 	rtinfo.rti_flags = flags | ifa->ifa_flags;
1616 	rtinfo.rti_ifa = ifa;
1617 	error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa);
1618 	if (m != NULL)
1619 		m_free(m);
1620 	return (error);
1621 }
1622 
1623 static void
1624 rtinit_rtrequest_callback(int cmd, int error,
1625 			  struct rt_addrinfo *rtinfo, struct rtentry *rt,
1626 			  void *arg)
1627 {
1628 	struct ifaddr *ifa = arg;
1629 
1630 	if (error == 0 && rt) {
1631 		if (mycpuid == 0) {
1632 			++rt->rt_refcnt;
1633 			rt_newaddrmsg(cmd, ifa, error, rt);
1634 			--rt->rt_refcnt;
1635 		}
1636 		if (cmd == RTM_DELETE) {
1637 			if (rt->rt_refcnt == 0) {
1638 				++rt->rt_refcnt;
1639 				rtfree(rt);
1640 			}
1641 		}
1642 	}
1643 }
1644 
1645 struct netmsg_rts {
1646 	struct netmsg_base	base;
1647 	int			req;
1648 	struct rt_addrinfo	*rtinfo;
1649 	rtsearch_callback_func_t callback;
1650 	void			*arg;
1651 	boolean_t		exact_match;
1652 	int			found_cnt;
1653 };
1654 
1655 int
1656 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1657 		rtsearch_callback_func_t callback, void *arg,
1658 		boolean_t exact_match)
1659 {
1660 	struct netmsg_rts msg;
1661 
1662 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1663 		    0, rtsearch_msghandler);
1664 	msg.req = req;
1665 	msg.rtinfo = rtinfo;
1666 	msg.callback = callback;
1667 	msg.arg = arg;
1668 	msg.exact_match = exact_match;
1669 	msg.found_cnt = 0;
1670 	return lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
1671 }
1672 
1673 static void
1674 rtsearch_msghandler(netmsg_t msg)
1675 {
1676 	struct netmsg_rts *rmsg = (void *)msg;
1677 	struct rt_addrinfo rtinfo;
1678 	struct radix_node_head *rnh;
1679 	struct rtentry *rt;
1680 	int nextcpu, error;
1681 
1682 	/*
1683 	 * Copy the rtinfo.  We need to make sure that the original
1684 	 * rtinfo, which is setup by the caller, in the netmsg will
1685 	 * _not_ be changed; else the next CPU on the netmsg forwarding
1686 	 * path will see a different rtinfo than what this CPU has seen.
1687 	 */
1688 	rtinfo = *rmsg->rtinfo;
1689 
1690 	/*
1691 	 * Find the correct routing tree to use for this Address Family
1692 	 */
1693 	if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1694 		if (mycpuid != 0)
1695 			panic("partially initialized routing tables\n");
1696 		lwkt_replymsg(&rmsg->base.lmsg, EAFNOSUPPORT);
1697 		return;
1698 	}
1699 
1700 	/*
1701 	 * Correct rtinfo for the host route searching.
1702 	 */
1703 	if (rtinfo.rti_flags & RTF_HOST) {
1704 		rtinfo.rti_netmask = NULL;
1705 		rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1706 	}
1707 
1708 	rt = (struct rtentry *)
1709 	     rnh->rnh_lookup((char *)rtinfo.rti_dst,
1710 			     (char *)rtinfo.rti_netmask, rnh);
1711 
1712 	/*
1713 	 * If we are asked to do the "exact match", we need to make sure
1714 	 * that host route searching got a host route while a network
1715 	 * route searching got a network route.
1716 	 */
1717 	if (rt != NULL && rmsg->exact_match &&
1718 	    ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1719 		rt = NULL;
1720 
1721 	if (rt == NULL) {
1722 		/*
1723 		 * No matching routes have been found, don't count this
1724 		 * as a critical error (here, we set 'error' to 0), just
1725 		 * keep moving on, since at least prcloned routes are not
1726 		 * duplicated onto each CPU.
1727 		 */
1728 		error = 0;
1729 	} else {
1730 		rmsg->found_cnt++;
1731 
1732 		rt->rt_refcnt++;
1733 		error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1734 				      rmsg->found_cnt);
1735 		rt->rt_refcnt--;
1736 
1737 		if (error == EJUSTRETURN) {
1738 			lwkt_replymsg(&rmsg->base.lmsg, 0);
1739 			return;
1740 		}
1741 	}
1742 
1743 	nextcpu = mycpuid + 1;
1744 	if (error) {
1745 		KKASSERT(rmsg->found_cnt > 0);
1746 
1747 		/*
1748 		 * Under following cases, unrecoverable error has
1749 		 * not occured:
1750 		 * o  Request is RTM_GET
1751 		 * o  The first time that we find the route, but the
1752 		 *    modification fails.
1753 		 */
1754 		if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1755 			panic("rtsearch_msghandler: unrecoverable error "
1756 			      "cpu %d", mycpuid);
1757 		}
1758 		lwkt_replymsg(&rmsg->base.lmsg, error);
1759 	} else if (nextcpu < ncpus) {
1760 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
1761 	} else {
1762 		if (rmsg->found_cnt == 0) {
1763 			/* The requested route was never seen ... */
1764 			error = ESRCH;
1765 		}
1766 		lwkt_replymsg(&rmsg->base.lmsg, error);
1767 	}
1768 }
1769 
1770 int
1771 rtmask_add_global(struct sockaddr *mask)
1772 {
1773 	struct netmsg_base msg;
1774 
1775 	netmsg_init(&msg, NULL, &curthread->td_msgport,
1776 		    0, rtmask_add_msghandler);
1777 	msg.lmsg.u.ms_resultp = mask;
1778 
1779 	return lwkt_domsg(rtable_portfn(0), &msg.lmsg, 0);
1780 }
1781 
1782 struct sockaddr *
1783 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1784 {
1785 	struct radix_node *n;
1786 
1787 #define	clen(s)	(*(u_char *)(s))
1788 	n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1789 	if (n != NULL &&
1790 	    mask->sa_len >= clen(n->rn_key) &&
1791 	    bcmp((char *)mask + 1,
1792 		 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1793 		return (struct sockaddr *)n->rn_key;
1794 	} else {
1795 		return NULL;
1796 	}
1797 #undef clen
1798 }
1799 
1800 static void
1801 rtmask_add_msghandler(netmsg_t msg)
1802 {
1803 	struct lwkt_msg *lmsg = &msg->lmsg;
1804 	struct sockaddr *mask = lmsg->u.ms_resultp;
1805 	int error = 0, nextcpu;
1806 
1807 	if (rtmask_lookup(mask) == NULL)
1808 		error = ENOBUFS;
1809 
1810 	nextcpu = mycpuid + 1;
1811 	if (!error && nextcpu < ncpus)
1812 		lwkt_forwardmsg(rtable_portfn(nextcpu), lmsg);
1813 	else
1814 		lwkt_replymsg(lmsg, error);
1815 }
1816 
1817 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1818 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1819