xref: /dragonfly/sys/net/route.c (revision a68e0df0)
1 /*
2  * Copyright (c) 2004, 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Berkeley and its contributors.
49  * 4. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)route.c	8.3 (Berkeley) 1/9/95
66  * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
67  * $DragonFly: src/sys/net/route.c,v 1.41 2008/11/09 10:50:15 sephe Exp $
68  */
69 
70 #include "opt_inet.h"
71 #include "opt_mpls.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/socket.h>
78 #include <sys/domain.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/globaldata.h>
82 #include <sys/thread.h>
83 
84 #include <net/if.h>
85 #include <net/route.h>
86 #include <net/netisr.h>
87 
88 #include <netinet/in.h>
89 #include <net/ip_mroute/ip_mroute.h>
90 
91 #include <sys/thread2.h>
92 #include <sys/msgport2.h>
93 #include <net/netmsg2.h>
94 
95 #ifdef MPLS
96 #include <netproto/mpls/mpls.h>
97 #endif
98 
99 static struct rtstatistics rtstatistics_percpu[MAXCPU];
100 #ifdef SMP
101 #define rtstat	rtstatistics_percpu[mycpuid]
102 #else
103 #define rtstat	rtstatistics_percpu[0]
104 #endif
105 
106 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
107 struct lwkt_port *rt_ports[MAXCPU];
108 
109 static void	rt_maskedcopy (struct sockaddr *, struct sockaddr *,
110 			       struct sockaddr *);
111 static void rtable_init(void);
112 static void rtable_service_loop(void *dummy);
113 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
114 				      struct rtentry *, void *);
115 
116 #ifdef SMP
117 static void rtredirect_msghandler(struct netmsg *netmsg);
118 static void rtrequest1_msghandler(struct netmsg *netmsg);
119 #endif
120 static void rtsearch_msghandler(struct netmsg *netmsg);
121 
122 static void rtmask_add_msghandler(struct netmsg *netmsg);
123 
124 static int rt_setshims(struct rtentry *, struct sockaddr **);
125 
126 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
127 
128 #ifdef ROUTE_DEBUG
129 static int route_debug = 1;
130 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
131            &route_debug, 0, "");
132 #endif
133 
134 int route_assert_owner_access = 0;
135 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW,
136            &route_assert_owner_access, 0, "");
137 
138 /*
139  * Initialize the route table(s) for protocol domains and
140  * create a helper thread which will be responsible for updating
141  * route table entries on each cpu.
142  */
143 void
144 route_init(void)
145 {
146 	int cpu;
147 	thread_t rtd;
148 
149 	for (cpu = 0; cpu < ncpus; ++cpu)
150 		bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
151 	rn_init();      /* initialize all zeroes, all ones, mask table */
152 	rtable_init();	/* call dom_rtattach() on each cpu */
153 
154 	for (cpu = 0; cpu < ncpus; cpu++) {
155 		lwkt_create(rtable_service_loop, NULL, &rtd, NULL,
156 			    0, cpu, "rtable_cpu %d", cpu);
157 		rt_ports[cpu] = &rtd->td_msgport;
158 	}
159 }
160 
161 static void
162 rtable_init_oncpu(struct netmsg *nmsg)
163 {
164 	struct domain *dom;
165 	int cpu = mycpuid;
166 
167 	SLIST_FOREACH(dom, &domains, dom_next) {
168 		if (dom->dom_rtattach) {
169 			dom->dom_rtattach(
170 				(void **)&rt_tables[cpu][dom->dom_family],
171 			        dom->dom_rtoffset);
172 		}
173 	}
174 	ifnet_forwardmsg(&nmsg->nm_lmsg, cpu + 1);
175 }
176 
177 static void
178 rtable_init(void)
179 {
180 	struct netmsg nmsg;
181 
182 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
183 		    0, rtable_init_oncpu);
184 	ifnet_domsg(&nmsg.nm_lmsg, 0);
185 }
186 
187 /*
188  * Our per-cpu table management protocol thread.  All route table operations
189  * are sequentially chained through all cpus starting at cpu #0 in order to
190  * maintain duplicate route tables on each cpu.  Having a spearate route
191  * table management thread allows the protocol and interrupt threads to
192  * issue route table changes.
193  */
194 static void
195 rtable_service_loop(void *dummy __unused)
196 {
197 	struct netmsg *netmsg;
198 	thread_t td = curthread;
199 
200 	while ((netmsg = lwkt_waitport(&td->td_msgport, 0)) != NULL) {
201 		netmsg->nm_dispatch(netmsg);
202 	}
203 }
204 
205 /*
206  * Routing statistics.
207  */
208 #ifdef SMP
209 static int
210 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
211 {
212 	int cpu, error = 0;
213 
214 	for (cpu = 0; cpu < ncpus; ++cpu) {
215 		if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
216 					sizeof(struct rtstatistics))))
217 				break;
218 		if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
219 					sizeof(struct rtstatistics))))
220 				break;
221 	}
222 
223 	return (error);
224 }
225 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
226 	0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
227 #else
228 SYSCTL_STRUCT(_net_route, OID_AUTO, stats, CTLFLAG_RW, &rtstat, rtstatistics,
229 "Routing statistics");
230 #endif
231 
232 /*
233  * Packet routing routines.
234  */
235 
236 /*
237  * Look up and fill in the "ro_rt" rtentry field in a route structure given
238  * an address in the "ro_dst" field.  Always send a report on a miss and
239  * always clone routes.
240  */
241 void
242 rtalloc(struct route *ro)
243 {
244 	rtalloc_ign(ro, 0UL);
245 }
246 
247 /*
248  * Look up and fill in the "ro_rt" rtentry field in a route structure given
249  * an address in the "ro_dst" field.  Always send a report on a miss and
250  * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
251  * ignored.
252  */
253 void
254 rtalloc_ign(struct route *ro, u_long ignoreflags)
255 {
256 	if (ro->ro_rt != NULL) {
257 		if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
258 			return;
259 		rtfree(ro->ro_rt);
260 		ro->ro_rt = NULL;
261 	}
262 	ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
263 }
264 
265 /*
266  * Look up the route that matches the given "dst" address.
267  *
268  * Route lookup can have the side-effect of creating and returning
269  * a cloned route instead when "dst" matches a cloning route and the
270  * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
271  *
272  * Any route returned has its reference count incremented.
273  */
274 struct rtentry *
275 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
276 {
277 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
278 	struct rtentry *rt;
279 
280 	if (rnh == NULL)
281 		goto unreach;
282 
283 	/*
284 	 * Look up route in the radix tree.
285 	 */
286 	rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
287 	if (rt == NULL)
288 		goto unreach;
289 
290 	/*
291 	 * Handle cloning routes.
292 	 */
293 	if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
294 		struct rtentry *clonedroute;
295 		int error;
296 
297 		clonedroute = rt;	/* copy in/copy out parameter */
298 		error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
299 				  &clonedroute);	/* clone the route */
300 		if (error != 0) {	/* cloning failed */
301 			if (generate_report)
302 				rt_dstmsg(RTM_MISS, dst, error);
303 			rt->rt_refcnt++;
304 			return (rt);	/* return the uncloned route */
305 		}
306 		if (generate_report) {
307 			if (clonedroute->rt_flags & RTF_XRESOLVE)
308 				rt_dstmsg(RTM_RESOLVE, dst, 0);
309 			else
310 				rt_rtmsg(RTM_ADD, clonedroute,
311 					 clonedroute->rt_ifp, 0);
312 		}
313 		return (clonedroute);	/* return cloned route */
314 	}
315 
316 	/*
317 	 * Increment the reference count of the matched route and return.
318 	 */
319 	rt->rt_refcnt++;
320 	return (rt);
321 
322 unreach:
323 	rtstat.rts_unreach++;
324 	if (generate_report)
325 		rt_dstmsg(RTM_MISS, dst, 0);
326 	return (NULL);
327 }
328 
329 void
330 rtfree(struct rtentry *rt)
331 {
332 	if (rt->rt_cpuid == mycpuid)
333 		rtfree_oncpu(rt);
334 	else
335 		rtfree_remote(rt, 1);
336 }
337 
338 void
339 rtfree_oncpu(struct rtentry *rt)
340 {
341 	KKASSERT(rt->rt_cpuid == mycpuid);
342 	KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
343 
344 	--rt->rt_refcnt;
345 	if (rt->rt_refcnt == 0) {
346 		struct radix_node_head *rnh =
347 		    rt_tables[mycpuid][rt_key(rt)->sa_family];
348 
349 		if (rnh->rnh_close)
350 			rnh->rnh_close((struct radix_node *)rt, rnh);
351 		if (!(rt->rt_flags & RTF_UP)) {
352 			/* deallocate route */
353 			if (rt->rt_ifa != NULL)
354 				IFAFREE(rt->rt_ifa);
355 			if (rt->rt_parent != NULL)
356 				RTFREE(rt->rt_parent);	/* recursive call! */
357 			Free(rt_key(rt));
358 			Free(rt);
359 		}
360 	}
361 }
362 
363 static void
364 rtfree_remote_dispatch(struct netmsg *nmsg)
365 {
366 	struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
367 	struct rtentry *rt = lmsg->u.ms_resultp;
368 
369 	rtfree_oncpu(rt);
370 	lwkt_replymsg(lmsg, 0);
371 }
372 
373 void
374 rtfree_remote(struct rtentry *rt, int allow_panic)
375 {
376 	struct netmsg nmsg;
377 	struct lwkt_msg *lmsg;
378 
379 	KKASSERT(rt->rt_cpuid != mycpuid);
380 
381 	if (route_assert_owner_access && allow_panic) {
382 		panic("rt remote free rt_cpuid %d, mycpuid %d\n",
383 		      rt->rt_cpuid, mycpuid);
384 	} else {
385 		kprintf("rt remote free rt_cpuid %d, mycpuid %d\n",
386 			rt->rt_cpuid, mycpuid);
387 		print_backtrace();
388 	}
389 
390 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
391 		    0, rtfree_remote_dispatch);
392 	lmsg = &nmsg.nm_lmsg;
393 	lmsg->u.ms_resultp = rt;
394 
395 	lwkt_domsg(rtable_portfn(rt->rt_cpuid), lmsg, 0);
396 }
397 
398 static int
399 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
400 		 struct sockaddr *netmask, int flags, struct sockaddr *src)
401 {
402 	struct rtentry *rt = NULL;
403 	struct rt_addrinfo rtinfo;
404 	struct ifaddr *ifa;
405 	u_long *stat = NULL;
406 	int error;
407 
408 	/* verify the gateway is directly reachable */
409 	if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
410 		error = ENETUNREACH;
411 		goto out;
412 	}
413 
414 	/*
415 	 * If the redirect isn't from our current router for this destination,
416 	 * it's either old or wrong.
417 	 */
418 	if (!(flags & RTF_DONE) &&		/* XXX JH */
419 	    (rt = rtpurelookup(dst)) != NULL &&
420 	    (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
421 		error = EINVAL;
422 		goto done;
423 	}
424 
425 	/*
426 	 * If it redirects us to ourselves, we have a routing loop,
427 	 * perhaps as a result of an interface going down recently.
428 	 */
429 	if (ifa_ifwithaddr(gateway)) {
430 		error = EHOSTUNREACH;
431 		goto done;
432 	}
433 
434 	/*
435 	 * Create a new entry if the lookup failed or if we got back
436 	 * a wildcard entry for the default route.  This is necessary
437 	 * for hosts which use routing redirects generated by smart
438 	 * gateways to dynamically build the routing tables.
439 	 */
440 	if (rt == NULL)
441 		goto create;
442 	if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
443 		rtfree(rt);
444 		goto create;
445 	}
446 
447 	/* Ignore redirects for directly connected hosts. */
448 	if (!(rt->rt_flags & RTF_GATEWAY)) {
449 		error = EHOSTUNREACH;
450 		goto done;
451 	}
452 
453 	if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
454 		/*
455 		 * Changing from a network route to a host route.
456 		 * Create a new host route rather than smashing the
457 		 * network route.
458 		 */
459 create:
460 		flags |=  RTF_GATEWAY | RTF_DYNAMIC;
461 		bzero(&rtinfo, sizeof(struct rt_addrinfo));
462 		rtinfo.rti_info[RTAX_DST] = dst;
463 		rtinfo.rti_info[RTAX_GATEWAY] = gateway;
464 		rtinfo.rti_info[RTAX_NETMASK] = netmask;
465 		rtinfo.rti_flags = flags;
466 		rtinfo.rti_ifa = ifa;
467 		rt = NULL;	/* copy-in/copy-out parameter */
468 		error = rtrequest1(RTM_ADD, &rtinfo, &rt);
469 		if (rt != NULL)
470 			flags = rt->rt_flags;
471 		stat = &rtstat.rts_dynamic;
472 	} else {
473 		/*
474 		 * Smash the current notion of the gateway to this destination.
475 		 * Should check about netmask!!!
476 		 */
477 		rt->rt_flags |= RTF_MODIFIED;
478 		flags |= RTF_MODIFIED;
479 
480 		/* We only need to report rtmsg on CPU0 */
481 		rt_setgate(rt, rt_key(rt), gateway,
482 			   mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
483 		error = 0;
484 		stat = &rtstat.rts_newgateway;
485 	}
486 
487 done:
488 	if (rt != NULL)
489 		rtfree(rt);
490 out:
491 	if (error != 0)
492 		rtstat.rts_badredirect++;
493 	else if (stat != NULL)
494 		(*stat)++;
495 
496 	return error;
497 }
498 
499 #ifdef SMP
500 
501 struct netmsg_rtredirect {
502 	struct netmsg	netmsg;
503 	struct sockaddr *dst;
504 	struct sockaddr *gateway;
505 	struct sockaddr *netmask;
506 	int		flags;
507 	struct sockaddr *src;
508 };
509 
510 #endif
511 
512 /*
513  * Force a routing table entry to the specified
514  * destination to go through the given gateway.
515  * Normally called as a result of a routing redirect
516  * message from the network layer.
517  *
518  * N.B.: must be called at splnet
519  */
520 void
521 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
522 	   struct sockaddr *netmask, int flags, struct sockaddr *src)
523 {
524 	struct rt_addrinfo rtinfo;
525 	int error;
526 #ifdef SMP
527 	struct netmsg_rtredirect msg;
528 
529 	netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
530 		    0, rtredirect_msghandler);
531 	msg.dst = dst;
532 	msg.gateway = gateway;
533 	msg.netmask = netmask;
534 	msg.flags = flags;
535 	msg.src = src;
536 	error = lwkt_domsg(rtable_portfn(0), &msg.netmsg.nm_lmsg, 0);
537 #else
538 	error = rtredirect_oncpu(dst, gateway, netmask, flags, src);
539 #endif
540 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
541 	rtinfo.rti_info[RTAX_DST] = dst;
542 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
543 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
544 	rtinfo.rti_info[RTAX_AUTHOR] = src;
545 	rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
546 }
547 
548 #ifdef SMP
549 
550 static void
551 rtredirect_msghandler(struct netmsg *netmsg)
552 {
553 	struct netmsg_rtredirect *msg = (void *)netmsg;
554 	int nextcpu;
555 
556 	rtredirect_oncpu(msg->dst, msg->gateway, msg->netmask,
557 			 msg->flags, msg->src);
558 	nextcpu = mycpuid + 1;
559 	if (nextcpu < ncpus)
560 		lwkt_forwardmsg(rtable_portfn(nextcpu), &netmsg->nm_lmsg);
561 	else
562 		lwkt_replymsg(&netmsg->nm_lmsg, 0);
563 }
564 
565 #endif
566 
567 /*
568 * Routing table ioctl interface.
569 */
570 int
571 rtioctl(u_long req, caddr_t data, struct ucred *cred)
572 {
573 #ifdef INET
574 	/* Multicast goop, grrr... */
575 	return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
576 #else
577 	return ENXIO;
578 #endif
579 }
580 
581 struct ifaddr *
582 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
583 {
584 	struct ifaddr *ifa;
585 
586 	if (!(flags & RTF_GATEWAY)) {
587 		/*
588 		 * If we are adding a route to an interface,
589 		 * and the interface is a point-to-point link,
590 		 * we should search for the destination
591 		 * as our clue to the interface.  Otherwise
592 		 * we can use the local address.
593 		 */
594 		ifa = NULL;
595 		if (flags & RTF_HOST) {
596 			ifa = ifa_ifwithdstaddr(dst);
597 		}
598 		if (ifa == NULL)
599 			ifa = ifa_ifwithaddr(gateway);
600 	} else {
601 		/*
602 		 * If we are adding a route to a remote net
603 		 * or host, the gateway may still be on the
604 		 * other end of a pt to pt link.
605 		 */
606 		ifa = ifa_ifwithdstaddr(gateway);
607 	}
608 	if (ifa == NULL)
609 		ifa = ifa_ifwithnet(gateway);
610 	if (ifa == NULL) {
611 		struct rtentry *rt;
612 
613 		rt = rtpurelookup(gateway);
614 		if (rt == NULL)
615 			return (NULL);
616 		rt->rt_refcnt--;
617 		if ((ifa = rt->rt_ifa) == NULL)
618 			return (NULL);
619 	}
620 	if (ifa->ifa_addr->sa_family != dst->sa_family) {
621 		struct ifaddr *oldifa = ifa;
622 
623 		ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
624 		if (ifa == NULL)
625 			ifa = oldifa;
626 	}
627 	return (ifa);
628 }
629 
630 static int rt_fixdelete (struct radix_node *, void *);
631 static int rt_fixchange (struct radix_node *, void *);
632 
633 struct rtfc_arg {
634 	struct rtentry *rt0;
635 	struct radix_node_head *rnh;
636 };
637 
638 /*
639  * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
640  */
641 int
642 rt_getifa(struct rt_addrinfo *rtinfo)
643 {
644 	struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
645 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
646 	struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
647 	int flags = rtinfo->rti_flags;
648 
649 	/*
650 	 * ifp may be specified by sockaddr_dl
651 	 * when protocol address is ambiguous.
652 	 */
653 	if (rtinfo->rti_ifp == NULL) {
654 		struct sockaddr *ifpaddr;
655 
656 		ifpaddr = rtinfo->rti_info[RTAX_IFP];
657 		if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
658 			struct ifaddr *ifa;
659 
660 			ifa = ifa_ifwithnet(ifpaddr);
661 			if (ifa != NULL)
662 				rtinfo->rti_ifp = ifa->ifa_ifp;
663 		}
664 	}
665 
666 	if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
667 		rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
668 	if (rtinfo->rti_ifa == NULL) {
669 		struct sockaddr *sa;
670 
671 		sa = ifaaddr != NULL ? ifaaddr :
672 		    (gateway != NULL ? gateway : dst);
673 		if (sa != NULL && rtinfo->rti_ifp != NULL)
674 			rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
675 		else if (dst != NULL && gateway != NULL)
676 			rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
677 		else if (sa != NULL)
678 			rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
679 	}
680 	if (rtinfo->rti_ifa == NULL)
681 		return (ENETUNREACH);
682 
683 	if (rtinfo->rti_ifp == NULL)
684 		rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
685 	return (0);
686 }
687 
688 /*
689  * Do appropriate manipulations of a routing tree given
690  * all the bits of info needed
691  */
692 int
693 rtrequest(
694 	int req,
695 	struct sockaddr *dst,
696 	struct sockaddr *gateway,
697 	struct sockaddr *netmask,
698 	int flags,
699 	struct rtentry **ret_nrt)
700 {
701 	struct rt_addrinfo rtinfo;
702 
703 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
704 	rtinfo.rti_info[RTAX_DST] = dst;
705 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
706 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
707 	rtinfo.rti_flags = flags;
708 	return rtrequest1(req, &rtinfo, ret_nrt);
709 }
710 
711 int
712 rtrequest_global(
713 	int req,
714 	struct sockaddr *dst,
715 	struct sockaddr *gateway,
716 	struct sockaddr *netmask,
717 	int flags)
718 {
719 	struct rt_addrinfo rtinfo;
720 
721 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
722 	rtinfo.rti_info[RTAX_DST] = dst;
723 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
724 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
725 	rtinfo.rti_flags = flags;
726 	return rtrequest1_global(req, &rtinfo, NULL, NULL);
727 }
728 
729 #ifdef SMP
730 
731 struct netmsg_rtq {
732 	struct netmsg		netmsg;
733 	int			req;
734 	struct rt_addrinfo	*rtinfo;
735 	rtrequest1_callback_func_t callback;
736 	void			*arg;
737 };
738 
739 #endif
740 
741 int
742 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
743 		  rtrequest1_callback_func_t callback, void *arg)
744 {
745 	int error;
746 #ifdef SMP
747 	struct netmsg_rtq msg;
748 
749 	netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
750 		    0, rtrequest1_msghandler);
751 	msg.netmsg.nm_lmsg.ms_error = -1;
752 	msg.req = req;
753 	msg.rtinfo = rtinfo;
754 	msg.callback = callback;
755 	msg.arg = arg;
756 	error = lwkt_domsg(rtable_portfn(0), &msg.netmsg.nm_lmsg, 0);
757 #else
758 	struct rtentry *rt = NULL;
759 
760 	error = rtrequest1(req, rtinfo, &rt);
761 	if (rt)
762 		--rt->rt_refcnt;
763 	if (callback)
764 		callback(req, error, rtinfo, rt, arg);
765 #endif
766 	return (error);
767 }
768 
769 /*
770  * Handle a route table request on the current cpu.  Since the route table's
771  * are supposed to be identical on each cpu, an error occuring later in the
772  * message chain is considered system-fatal.
773  */
774 #ifdef SMP
775 
776 static void
777 rtrequest1_msghandler(struct netmsg *netmsg)
778 {
779 	struct netmsg_rtq *msg = (void *)netmsg;
780 	struct rt_addrinfo rtinfo;
781 	struct rtentry *rt = NULL;
782 	int nextcpu;
783 	int error;
784 
785 	/*
786 	 * Copy the rtinfo.  We need to make sure that the original
787 	 * rtinfo, which is setup by the caller, in the netmsg will
788 	 * _not_ be changed; else the next CPU on the netmsg forwarding
789 	 * path will see a different rtinfo than what this CPU has seen.
790 	 */
791 	rtinfo = *msg->rtinfo;
792 
793 	error = rtrequest1(msg->req, &rtinfo, &rt);
794 	if (rt)
795 		--rt->rt_refcnt;
796 	if (msg->callback)
797 		msg->callback(msg->req, error, &rtinfo, rt, msg->arg);
798 
799 	/*
800 	 * RTM_DELETE's are propogated even if an error occurs, since a
801 	 * cloned route might be undergoing deletion and cloned routes
802 	 * are not necessarily replicated.  An overall error is returned
803 	 * only if no cpus have the route in question.
804 	 */
805 	if (msg->netmsg.nm_lmsg.ms_error < 0 || error == 0)
806 		msg->netmsg.nm_lmsg.ms_error = error;
807 
808 	nextcpu = mycpuid + 1;
809 	if (error && msg->req != RTM_DELETE) {
810 		if (mycpuid != 0) {
811 			panic("rtrequest1_msghandler: rtrequest table "
812 			      "error was not on cpu #0");
813 		}
814 		lwkt_replymsg(&msg->netmsg.nm_lmsg, error);
815 	} else if (nextcpu < ncpus) {
816 		lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->netmsg.nm_lmsg);
817 	} else {
818 		lwkt_replymsg(&msg->netmsg.nm_lmsg,
819 			      msg->netmsg.nm_lmsg.ms_error);
820 	}
821 }
822 
823 #endif
824 
825 int
826 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
827 {
828 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
829 	struct rtentry *rt;
830 	struct radix_node *rn;
831 	struct radix_node_head *rnh;
832 	struct ifaddr *ifa;
833 	struct sockaddr *ndst;
834 	boolean_t reportmsg;
835 	int error = 0;
836 
837 #define gotoerr(x) { error = x ; goto bad; }
838 
839 #ifdef ROUTE_DEBUG
840 	if (route_debug)
841 		rt_addrinfo_print(req, rtinfo);
842 #endif
843 
844 	crit_enter();
845 	/*
846 	 * Find the correct routing tree to use for this Address Family
847 	 */
848 	if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
849 		gotoerr(EAFNOSUPPORT);
850 
851 	/*
852 	 * If we are adding a host route then we don't want to put
853 	 * a netmask in the tree, nor do we want to clone it.
854 	 */
855 	if (rtinfo->rti_flags & RTF_HOST) {
856 		rtinfo->rti_info[RTAX_NETMASK] = NULL;
857 		rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
858 	}
859 
860 	switch (req) {
861 	case RTM_DELETE:
862 		/* Remove the item from the tree. */
863 		rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
864 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
865 				      rnh);
866 		if (rn == NULL)
867 			gotoerr(ESRCH);
868 		KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
869 			("rnh_deladdr returned flags 0x%x", rn->rn_flags));
870 		rt = (struct rtentry *)rn;
871 
872 		/* ref to prevent a deletion race */
873 		++rt->rt_refcnt;
874 
875 		/* Free any routes cloned from this one. */
876 		if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
877 		    rt_mask(rt) != NULL) {
878 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
879 					       (char *)rt_mask(rt),
880 					       rt_fixdelete, rt);
881 		}
882 
883 		if (rt->rt_gwroute != NULL) {
884 			RTFREE(rt->rt_gwroute);
885 			rt->rt_gwroute = NULL;
886 		}
887 
888 		/*
889 		 * NB: RTF_UP must be set during the search above,
890 		 * because we might delete the last ref, causing
891 		 * rt to get freed prematurely.
892 		 */
893 		rt->rt_flags &= ~RTF_UP;
894 
895 #ifdef ROUTE_DEBUG
896 		if (route_debug)
897 			rt_print(rtinfo, rt);
898 #endif
899 
900 		/* Give the protocol a chance to keep things in sync. */
901 		if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
902 			ifa->ifa_rtrequest(RTM_DELETE, rt, rtinfo);
903 
904 		/*
905 		 * If the caller wants it, then it can have it,
906 		 * but it's up to it to free the rtentry as we won't be
907 		 * doing it.
908 		 */
909 		KASSERT(rt->rt_refcnt >= 0,
910 			("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
911 		if (ret_nrt != NULL) {
912 			/* leave ref intact for return */
913 			*ret_nrt = rt;
914 		} else {
915 			/* deref / attempt to destroy */
916 			rtfree(rt);
917 		}
918 		break;
919 
920 	case RTM_RESOLVE:
921 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
922 			gotoerr(EINVAL);
923 		ifa = rt->rt_ifa;
924 		rtinfo->rti_flags =
925 		    rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
926 		rtinfo->rti_flags |= RTF_WASCLONED;
927 		rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
928 		if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
929 			rtinfo->rti_flags |= RTF_HOST;
930 		rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
931 		rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
932 		rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
933 		goto makeroute;
934 
935 	case RTM_ADD:
936 		KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
937 			rtinfo->rti_info[RTAX_GATEWAY] != NULL,
938 		    ("rtrequest: GATEWAY but no gateway"));
939 
940 		if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
941 			gotoerr(error);
942 		ifa = rtinfo->rti_ifa;
943 makeroute:
944 		R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
945 		if (rt == NULL)
946 			gotoerr(ENOBUFS);
947 		bzero(rt, sizeof(struct rtentry));
948 		rt->rt_flags = RTF_UP | rtinfo->rti_flags;
949 		rt->rt_cpuid = mycpuid;
950 
951 		if (mycpuid != 0 && req == RTM_ADD) {
952 			/* For RTM_ADD, we have already sent rtmsg on CPU0. */
953 			reportmsg = RTL_DONTREPORT;
954 		} else {
955 			/*
956 			 * For RTM_ADD, we only send rtmsg on CPU0.
957 			 * For RTM_RESOLVE, we always send rtmsg. XXX
958 			 */
959 			reportmsg = RTL_REPORTMSG;
960 		}
961 		error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
962 				   reportmsg);
963 		if (error != 0) {
964 			Free(rt);
965 			gotoerr(error);
966 		}
967 
968 		ndst = rt_key(rt);
969 		if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
970 			rt_maskedcopy(dst, ndst,
971 				      rtinfo->rti_info[RTAX_NETMASK]);
972 		else
973 			bcopy(dst, ndst, dst->sa_len);
974 
975 		if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
976 			rt_setshims(rt, rtinfo->rti_info);
977 
978 		/*
979 		 * Note that we now have a reference to the ifa.
980 		 * This moved from below so that rnh->rnh_addaddr() can
981 		 * examine the ifa and  ifa->ifa_ifp if it so desires.
982 		 */
983 		IFAREF(ifa);
984 		rt->rt_ifa = ifa;
985 		rt->rt_ifp = ifa->ifa_ifp;
986 		/* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
987 
988 		rn = rnh->rnh_addaddr((char *)ndst,
989 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
990 				      rnh, rt->rt_nodes);
991 		if (rn == NULL) {
992 			struct rtentry *oldrt;
993 
994 			/*
995 			 * We already have one of these in the tree.
996 			 * We do a special hack: if the old route was
997 			 * cloned, then we blow it away and try
998 			 * re-inserting the new one.
999 			 */
1000 			oldrt = rtpurelookup(ndst);
1001 			if (oldrt != NULL) {
1002 				--oldrt->rt_refcnt;
1003 				if (oldrt->rt_flags & RTF_WASCLONED) {
1004 					rtrequest(RTM_DELETE, rt_key(oldrt),
1005 						  oldrt->rt_gateway,
1006 						  rt_mask(oldrt),
1007 						  oldrt->rt_flags, NULL);
1008 					rn = rnh->rnh_addaddr((char *)ndst,
1009 					    (char *)
1010 						rtinfo->rti_info[RTAX_NETMASK],
1011 					    rnh, rt->rt_nodes);
1012 				}
1013 			}
1014 		}
1015 
1016 		/*
1017 		 * If it still failed to go into the tree,
1018 		 * then un-make it (this should be a function).
1019 		 */
1020 		if (rn == NULL) {
1021 			if (rt->rt_gwroute != NULL)
1022 				rtfree(rt->rt_gwroute);
1023 			IFAFREE(ifa);
1024 			Free(rt_key(rt));
1025 			Free(rt);
1026 			gotoerr(EEXIST);
1027 		}
1028 
1029 		/*
1030 		 * If we got here from RESOLVE, then we are cloning
1031 		 * so clone the rest, and note that we
1032 		 * are a clone (and increment the parent's references)
1033 		 */
1034 		if (req == RTM_RESOLVE) {
1035 			rt->rt_rmx = (*ret_nrt)->rt_rmx;    /* copy metrics */
1036 			rt->rt_rmx.rmx_pksent = 0;  /* reset packet counter */
1037 			if ((*ret_nrt)->rt_flags &
1038 				       (RTF_CLONING | RTF_PRCLONING)) {
1039 				rt->rt_parent = *ret_nrt;
1040 				(*ret_nrt)->rt_refcnt++;
1041 			}
1042 		}
1043 
1044 		/*
1045 		 * if this protocol has something to add to this then
1046 		 * allow it to do that as well.
1047 		 */
1048 		if (ifa->ifa_rtrequest != NULL)
1049 			ifa->ifa_rtrequest(req, rt, rtinfo);
1050 
1051 		/*
1052 		 * We repeat the same procedure from rt_setgate() here because
1053 		 * it doesn't fire when we call it there because the node
1054 		 * hasn't been added to the tree yet.
1055 		 */
1056 		if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
1057 		    rt_mask(rt) != NULL) {
1058 			struct rtfc_arg arg = { rt, rnh };
1059 
1060 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1061 					       (char *)rt_mask(rt),
1062 					       rt_fixchange, &arg);
1063 		}
1064 
1065 #ifdef ROUTE_DEBUG
1066 		if (route_debug)
1067 			rt_print(rtinfo, rt);
1068 #endif
1069 		/*
1070 		 * Return the resulting rtentry,
1071 		 * increasing the number of references by one.
1072 		 */
1073 		if (ret_nrt != NULL) {
1074 			rt->rt_refcnt++;
1075 			*ret_nrt = rt;
1076 		}
1077 		break;
1078 	default:
1079 		error = EOPNOTSUPP;
1080 	}
1081 bad:
1082 #ifdef ROUTE_DEBUG
1083 	if (route_debug) {
1084 		if (error)
1085 			kprintf("rti %p failed error %d\n", rtinfo, error);
1086 		else
1087 			kprintf("rti %p succeeded\n", rtinfo);
1088 	}
1089 #endif
1090 	crit_exit();
1091 	return (error);
1092 }
1093 
1094 /*
1095  * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1096  * (i.e., the routes related to it by the operation of cloning).  This
1097  * routine is iterated over all potential former-child-routes by way of
1098  * rnh->rnh_walktree_from() above, and those that actually are children of
1099  * the late parent (passed in as VP here) are themselves deleted.
1100  */
1101 static int
1102 rt_fixdelete(struct radix_node *rn, void *vp)
1103 {
1104 	struct rtentry *rt = (struct rtentry *)rn;
1105 	struct rtentry *rt0 = vp;
1106 
1107 	if (rt->rt_parent == rt0 &&
1108 	    !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1109 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1110 				 rt->rt_flags, NULL);
1111 	}
1112 	return 0;
1113 }
1114 
1115 /*
1116  * This routine is called from rt_setgate() to do the analogous thing for
1117  * adds and changes.  There is the added complication in this case of a
1118  * middle insert; i.e., insertion of a new network route between an older
1119  * network route and (cloned) host routes.  For this reason, a simple check
1120  * of rt->rt_parent is insufficient; each candidate route must be tested
1121  * against the (mask, value) of the new route (passed as before in vp)
1122  * to see if the new route matches it.
1123  *
1124  * XXX - it may be possible to do fixdelete() for changes and reserve this
1125  * routine just for adds.  I'm not sure why I thought it was necessary to do
1126  * changes this way.
1127  */
1128 #ifdef DEBUG
1129 static int rtfcdebug = 0;
1130 #endif
1131 
1132 static int
1133 rt_fixchange(struct radix_node *rn, void *vp)
1134 {
1135 	struct rtentry *rt = (struct rtentry *)rn;
1136 	struct rtfc_arg *ap = vp;
1137 	struct rtentry *rt0 = ap->rt0;
1138 	struct radix_node_head *rnh = ap->rnh;
1139 	u_char *xk1, *xm1, *xk2, *xmp;
1140 	int i, len, mlen;
1141 
1142 #ifdef DEBUG
1143 	if (rtfcdebug)
1144 		kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1145 #endif
1146 
1147 	if (rt->rt_parent == NULL ||
1148 	    (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1149 #ifdef DEBUG
1150 		if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1151 #endif
1152 		return 0;
1153 	}
1154 
1155 	if (rt->rt_parent == rt0) {
1156 #ifdef DEBUG
1157 		if (rtfcdebug) kprintf("parent match\n");
1158 #endif
1159 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1160 				 rt->rt_flags, NULL);
1161 	}
1162 
1163 	/*
1164 	 * There probably is a function somewhere which does this...
1165 	 * if not, there should be.
1166 	 */
1167 	len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1168 
1169 	xk1 = (u_char *)rt_key(rt0);
1170 	xm1 = (u_char *)rt_mask(rt0);
1171 	xk2 = (u_char *)rt_key(rt);
1172 
1173 	/* avoid applying a less specific route */
1174 	xmp = (u_char *)rt_mask(rt->rt_parent);
1175 	mlen = rt_key(rt->rt_parent)->sa_len;
1176 	if (mlen > rt_key(rt0)->sa_len) {
1177 #ifdef DEBUG
1178 		if (rtfcdebug)
1179 			kprintf("rt_fixchange: inserting a less "
1180 			       "specific route\n");
1181 #endif
1182 		return 0;
1183 	}
1184 	for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1185 		if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1186 #ifdef DEBUG
1187 			if (rtfcdebug)
1188 				kprintf("rt_fixchange: inserting a less "
1189 				       "specific route\n");
1190 #endif
1191 			return 0;
1192 		}
1193 	}
1194 
1195 	for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1196 		if ((xk2[i] & xm1[i]) != xk1[i]) {
1197 #ifdef DEBUG
1198 			if (rtfcdebug) kprintf("no match\n");
1199 #endif
1200 			return 0;
1201 		}
1202 	}
1203 
1204 	/*
1205 	 * OK, this node is a clone, and matches the node currently being
1206 	 * changed/added under the node's mask.  So, get rid of it.
1207 	 */
1208 #ifdef DEBUG
1209 	if (rtfcdebug) kprintf("deleting\n");
1210 #endif
1211 	return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1212 			 rt->rt_flags, NULL);
1213 }
1214 
1215 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
1216 
1217 int
1218 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1219 	   boolean_t generate_report)
1220 {
1221 	char *space, *oldspace;
1222 	int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
1223 	struct rtentry *rt = rt0;
1224 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1225 
1226 	/*
1227 	 * A host route with the destination equal to the gateway
1228 	 * will interfere with keeping LLINFO in the routing
1229 	 * table, so disallow it.
1230 	 */
1231 	if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1232 			      (RTF_HOST | RTF_GATEWAY)) &&
1233 	    dst->sa_len == gate->sa_len &&
1234 	    sa_equal(dst, gate)) {
1235 		/*
1236 		 * The route might already exist if this is an RTM_CHANGE
1237 		 * or a routing redirect, so try to delete it.
1238 		 */
1239 		if (rt_key(rt0) != NULL)
1240 			rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1241 				  rt_mask(rt0), rt0->rt_flags, NULL);
1242 		return EADDRNOTAVAIL;
1243 	}
1244 
1245 	/*
1246 	 * Both dst and gateway are stored in the same malloc'ed chunk
1247 	 * (If I ever get my hands on....)
1248 	 * if we need to malloc a new chunk, then keep the old one around
1249 	 * till we don't need it any more.
1250 	 */
1251 	if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
1252 		oldspace = (char *)rt_key(rt);
1253 		R_Malloc(space, char *, dlen + glen);
1254 		if (space == NULL)
1255 			return ENOBUFS;
1256 		rt->rt_nodes->rn_key = space;
1257 	} else {
1258 		space = (char *)rt_key(rt);	/* Just use the old space. */
1259 		oldspace = NULL;
1260 	}
1261 
1262 	/* Set the gateway value. */
1263 	rt->rt_gateway = (struct sockaddr *)(space + dlen);
1264 	bcopy(gate, rt->rt_gateway, glen);
1265 
1266 	if (oldspace != NULL) {
1267 		/*
1268 		 * If we allocated a new chunk, preserve the original dst.
1269 		 * This way, rt_setgate() really just sets the gate
1270 		 * and leaves the dst field alone.
1271 		 */
1272 		bcopy(dst, space, dlen);
1273 		Free(oldspace);
1274 	}
1275 
1276 	/*
1277 	 * If there is already a gwroute, it's now almost definitely wrong
1278 	 * so drop it.
1279 	 */
1280 	if (rt->rt_gwroute != NULL) {
1281 		RTFREE(rt->rt_gwroute);
1282 		rt->rt_gwroute = NULL;
1283 	}
1284 	if (rt->rt_flags & RTF_GATEWAY) {
1285 		/*
1286 		 * Cloning loop avoidance: In the presence of
1287 		 * protocol-cloning and bad configuration, it is
1288 		 * possible to get stuck in bottomless mutual recursion
1289 		 * (rtrequest rt_setgate rtlookup).  We avoid this
1290 		 * by not allowing protocol-cloning to operate for
1291 		 * gateways (which is probably the correct choice
1292 		 * anyway), and avoid the resulting reference loops
1293 		 * by disallowing any route to run through itself as
1294 		 * a gateway.  This is obviously mandatory when we
1295 		 * get rt->rt_output().
1296 		 *
1297 		 * This breaks TTCP for hosts outside the gateway!  XXX JH
1298 		 */
1299 		rt->rt_gwroute = _rtlookup(gate, generate_report,
1300 					   RTF_PRCLONING);
1301 		if (rt->rt_gwroute == rt) {
1302 			rt->rt_gwroute = NULL;
1303 			--rt->rt_refcnt;
1304 			return EDQUOT; /* failure */
1305 		}
1306 	}
1307 
1308 	/*
1309 	 * This isn't going to do anything useful for host routes, so
1310 	 * don't bother.  Also make sure we have a reasonable mask
1311 	 * (we don't yet have one during adds).
1312 	 */
1313 	if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1314 		struct rtfc_arg arg = { rt, rnh };
1315 
1316 		rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1317 				       (char *)rt_mask(rt),
1318 				       rt_fixchange, &arg);
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 static void
1325 rt_maskedcopy(
1326 	struct sockaddr *src,
1327 	struct sockaddr *dst,
1328 	struct sockaddr *netmask)
1329 {
1330 	u_char *cp1 = (u_char *)src;
1331 	u_char *cp2 = (u_char *)dst;
1332 	u_char *cp3 = (u_char *)netmask;
1333 	u_char *cplim = cp2 + *cp3;
1334 	u_char *cplim2 = cp2 + *cp1;
1335 
1336 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1337 	cp3 += 2;
1338 	if (cplim > cplim2)
1339 		cplim = cplim2;
1340 	while (cp2 < cplim)
1341 		*cp2++ = *cp1++ & *cp3++;
1342 	if (cp2 < cplim2)
1343 		bzero(cp2, cplim2 - cp2);
1344 }
1345 
1346 int
1347 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1348 {
1349 	struct rtentry *up_rt, *rt;
1350 
1351 	if (!(rt0->rt_flags & RTF_UP)) {
1352 		up_rt = rtlookup(dst);
1353 		if (up_rt == NULL)
1354 			return (EHOSTUNREACH);
1355 		up_rt->rt_refcnt--;
1356 	} else
1357 		up_rt = rt0;
1358 	if (up_rt->rt_flags & RTF_GATEWAY) {
1359 		if (up_rt->rt_gwroute == NULL) {
1360 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1361 			if (up_rt->rt_gwroute == NULL)
1362 				return (EHOSTUNREACH);
1363 		} else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1364 			rtfree(up_rt->rt_gwroute);
1365 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1366 			if (up_rt->rt_gwroute == NULL)
1367 				return (EHOSTUNREACH);
1368 		}
1369 		rt = up_rt->rt_gwroute;
1370 	} else
1371 		rt = up_rt;
1372 	if (rt->rt_flags & RTF_REJECT &&
1373 	    (rt->rt_rmx.rmx_expire == 0 ||		/* rt doesn't expire */
1374 	     time_second < rt->rt_rmx.rmx_expire))	/* rt not expired */
1375 		return (rt->rt_flags & RTF_HOST ?  EHOSTDOWN : EHOSTUNREACH);
1376 	*drt = rt;
1377 	return 0;
1378 }
1379 
1380 static int
1381 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1382 	int i;
1383 
1384 	for (i=0; i<3; i++) {
1385 		struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1386 		int shimlen;
1387 
1388 		if (shim == NULL)
1389 			break;
1390 
1391 		shimlen = ROUNDUP(shim->sa_len);
1392 		R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1393 		bcopy(shim, rt->rt_shim[i], shimlen);
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 #ifdef ROUTE_DEBUG
1400 
1401 /*
1402  * Print out a route table entry
1403  */
1404 void
1405 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1406 {
1407 	kprintf("rti %p cpu %d route %p flags %08lx: ",
1408 		rtinfo, mycpuid, rn, rn->rt_flags);
1409 	sockaddr_print(rt_key(rn));
1410 	kprintf(" mask ");
1411 	sockaddr_print(rt_mask(rn));
1412 	kprintf(" gw ");
1413 	sockaddr_print(rn->rt_gateway);
1414 	kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1415 	kprintf(" ifa %p\n", rn->rt_ifa);
1416 }
1417 
1418 void
1419 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1420 {
1421 	int didit = 0;
1422 	int i;
1423 
1424 #ifdef ROUTE_DEBUG
1425 	if (cmd == RTM_DELETE && route_debug > 1)
1426 		print_backtrace();
1427 #endif
1428 
1429 	switch(cmd) {
1430 	case RTM_ADD:
1431 		kprintf("ADD ");
1432 		break;
1433 	case RTM_RESOLVE:
1434 		kprintf("RES ");
1435 		break;
1436 	case RTM_DELETE:
1437 		kprintf("DEL ");
1438 		break;
1439 	default:
1440 		kprintf("C%02d ", cmd);
1441 		break;
1442 	}
1443 	kprintf("rti %p cpu %d ", rti, mycpuid);
1444 	for (i = 0; i < rti->rti_addrs; ++i) {
1445 		if (rti->rti_info[i] == NULL)
1446 			continue;
1447 		if (didit)
1448 			kprintf(" ,");
1449 		switch(i) {
1450 		case RTAX_DST:
1451 			kprintf("(DST ");
1452 			break;
1453 		case RTAX_GATEWAY:
1454 			kprintf("(GWY ");
1455 			break;
1456 		case RTAX_NETMASK:
1457 			kprintf("(MSK ");
1458 			break;
1459 		case RTAX_GENMASK:
1460 			kprintf("(GEN ");
1461 			break;
1462 		case RTAX_IFP:
1463 			kprintf("(IFP ");
1464 			break;
1465 		case RTAX_IFA:
1466 			kprintf("(IFA ");
1467 			break;
1468 		case RTAX_AUTHOR:
1469 			kprintf("(AUT ");
1470 			break;
1471 		case RTAX_BRD:
1472 			kprintf("(BRD ");
1473 			break;
1474 		default:
1475 			kprintf("(?%02d ", i);
1476 			break;
1477 		}
1478 		sockaddr_print(rti->rti_info[i]);
1479 		kprintf(")");
1480 		didit = 1;
1481 	}
1482 	kprintf("\n");
1483 }
1484 
1485 void
1486 sockaddr_print(struct sockaddr *sa)
1487 {
1488 	struct sockaddr_in *sa4;
1489 	struct sockaddr_in6 *sa6;
1490 	int len;
1491 	int i;
1492 
1493 	if (sa == NULL) {
1494 		kprintf("NULL");
1495 		return;
1496 	}
1497 
1498 	len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1499 
1500 	switch(sa->sa_family) {
1501 	case AF_INET:
1502 	case AF_INET6:
1503 	default:
1504 		switch(sa->sa_family) {
1505 		case AF_INET:
1506 			sa4 = (struct sockaddr_in *)sa;
1507 			kprintf("INET %d %d.%d.%d.%d",
1508 				ntohs(sa4->sin_port),
1509 				(ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1510 				(ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1511 				(ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1512 				(ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1513 			);
1514 			break;
1515 		case AF_INET6:
1516 			sa6 = (struct sockaddr_in6 *)sa;
1517 			kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1518 				ntohs(sa6->sin6_port),
1519 				sa6->sin6_addr.s6_addr16[0],
1520 				sa6->sin6_addr.s6_addr16[1],
1521 				sa6->sin6_addr.s6_addr16[2],
1522 				sa6->sin6_addr.s6_addr16[3],
1523 				sa6->sin6_addr.s6_addr16[4],
1524 				sa6->sin6_addr.s6_addr16[5],
1525 				sa6->sin6_addr.s6_addr16[6],
1526 				sa6->sin6_addr.s6_addr16[7]
1527 			);
1528 			break;
1529 		default:
1530 			kprintf("AF%d ", sa->sa_family);
1531 			while (len > 0 && sa->sa_data[len-1] == 0)
1532 				--len;
1533 
1534 			for (i = 0; i < len; ++i) {
1535 				if (i)
1536 					kprintf(".");
1537 				kprintf("%d", (unsigned char)sa->sa_data[i]);
1538 			}
1539 			break;
1540 		}
1541 	}
1542 }
1543 
1544 #endif
1545 
1546 /*
1547  * Set up a routing table entry, normally for an interface.
1548  */
1549 int
1550 rtinit(struct ifaddr *ifa, int cmd, int flags)
1551 {
1552 	struct sockaddr *dst, *deldst, *netmask;
1553 	struct mbuf *m = NULL;
1554 	struct radix_node_head *rnh;
1555 	struct radix_node *rn;
1556 	struct rt_addrinfo rtinfo;
1557 	int error;
1558 
1559 	if (flags & RTF_HOST) {
1560 		dst = ifa->ifa_dstaddr;
1561 		netmask = NULL;
1562 	} else {
1563 		dst = ifa->ifa_addr;
1564 		netmask = ifa->ifa_netmask;
1565 	}
1566 	/*
1567 	 * If it's a delete, check that if it exists, it's on the correct
1568 	 * interface or we might scrub a route to another ifa which would
1569 	 * be confusing at best and possibly worse.
1570 	 */
1571 	if (cmd == RTM_DELETE) {
1572 		/*
1573 		 * It's a delete, so it should already exist..
1574 		 * If it's a net, mask off the host bits
1575 		 * (Assuming we have a mask)
1576 		 */
1577 		if (netmask != NULL) {
1578 			m = m_get(MB_DONTWAIT, MT_SONAME);
1579 			if (m == NULL)
1580 				return (ENOBUFS);
1581 			mbuftrackid(m, 34);
1582 			deldst = mtod(m, struct sockaddr *);
1583 			rt_maskedcopy(dst, deldst, netmask);
1584 			dst = deldst;
1585 		}
1586 		/*
1587 		 * Look up an rtentry that is in the routing tree and
1588 		 * contains the correct info.
1589 		 */
1590 		if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1591 		    (rn = rnh->rnh_lookup((char *)dst,
1592 					  (char *)netmask, rnh)) == NULL ||
1593 		    ((struct rtentry *)rn)->rt_ifa != ifa ||
1594 		    !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1595 			if (m != NULL)
1596 				m_free(m);
1597 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1598 		}
1599 		/* XXX */
1600 #if 0
1601 		else {
1602 			/*
1603 			 * One would think that as we are deleting, and we know
1604 			 * it doesn't exist, we could just return at this point
1605 			 * with an "ELSE" clause, but apparently not..
1606 			 */
1607 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1608 		}
1609 #endif
1610 	}
1611 	/*
1612 	 * Do the actual request
1613 	 */
1614 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
1615 	rtinfo.rti_info[RTAX_DST] = dst;
1616 	rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1617 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
1618 	rtinfo.rti_flags = flags | ifa->ifa_flags;
1619 	rtinfo.rti_ifa = ifa;
1620 	error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa);
1621 	if (m != NULL)
1622 		m_free(m);
1623 	return (error);
1624 }
1625 
1626 static void
1627 rtinit_rtrequest_callback(int cmd, int error,
1628 			  struct rt_addrinfo *rtinfo, struct rtentry *rt,
1629 			  void *arg)
1630 {
1631 	struct ifaddr *ifa = arg;
1632 
1633 	if (error == 0 && rt) {
1634 		if (mycpuid == 0) {
1635 			++rt->rt_refcnt;
1636 			rt_newaddrmsg(cmd, ifa, error, rt);
1637 			--rt->rt_refcnt;
1638 		}
1639 		if (cmd == RTM_DELETE) {
1640 			if (rt->rt_refcnt == 0) {
1641 				++rt->rt_refcnt;
1642 				rtfree(rt);
1643 			}
1644 		}
1645 	}
1646 }
1647 
1648 struct netmsg_rts {
1649 	struct netmsg		netmsg;
1650 	int			req;
1651 	struct rt_addrinfo	*rtinfo;
1652 	rtsearch_callback_func_t callback;
1653 	void			*arg;
1654 	boolean_t		exact_match;
1655 	int			found_cnt;
1656 };
1657 
1658 int
1659 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1660 		rtsearch_callback_func_t callback, void *arg,
1661 		boolean_t exact_match)
1662 {
1663 	struct netmsg_rts msg;
1664 
1665 	netmsg_init(&msg.netmsg, NULL, &curthread->td_msgport,
1666 		    0, rtsearch_msghandler);
1667 	msg.req = req;
1668 	msg.rtinfo = rtinfo;
1669 	msg.callback = callback;
1670 	msg.arg = arg;
1671 	msg.exact_match = exact_match;
1672 	msg.found_cnt = 0;
1673 	return lwkt_domsg(rtable_portfn(0), &msg.netmsg.nm_lmsg, 0);
1674 }
1675 
1676 static void
1677 rtsearch_msghandler(struct netmsg *netmsg)
1678 {
1679 	struct netmsg_rts *msg = (void *)netmsg;
1680 	struct rt_addrinfo rtinfo;
1681 	struct radix_node_head *rnh;
1682 	struct rtentry *rt;
1683 	int nextcpu, error;
1684 
1685 	/*
1686 	 * Copy the rtinfo.  We need to make sure that the original
1687 	 * rtinfo, which is setup by the caller, in the netmsg will
1688 	 * _not_ be changed; else the next CPU on the netmsg forwarding
1689 	 * path will see a different rtinfo than what this CPU has seen.
1690 	 */
1691 	rtinfo = *msg->rtinfo;
1692 
1693 	/*
1694 	 * Find the correct routing tree to use for this Address Family
1695 	 */
1696 	if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1697 		if (mycpuid != 0)
1698 			panic("partially initialized routing tables\n");
1699 		lwkt_replymsg(&msg->netmsg.nm_lmsg, EAFNOSUPPORT);
1700 		return;
1701 	}
1702 
1703 	/*
1704 	 * Correct rtinfo for the host route searching.
1705 	 */
1706 	if (rtinfo.rti_flags & RTF_HOST) {
1707 		rtinfo.rti_netmask = NULL;
1708 		rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1709 	}
1710 
1711 	rt = (struct rtentry *)
1712 	     rnh->rnh_lookup((char *)rtinfo.rti_dst,
1713 			     (char *)rtinfo.rti_netmask, rnh);
1714 
1715 	/*
1716 	 * If we are asked to do the "exact match", we need to make sure
1717 	 * that host route searching got a host route while a network
1718 	 * route searching got a network route.
1719 	 */
1720 	if (rt != NULL && msg->exact_match &&
1721 	    ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1722 		rt = NULL;
1723 
1724 	if (rt == NULL) {
1725 		/*
1726 		 * No matching routes have been found, don't count this
1727 		 * as a critical error (here, we set 'error' to 0), just
1728 		 * keep moving on, since at least prcloned routes are not
1729 		 * duplicated onto each CPU.
1730 		 */
1731 		error = 0;
1732 	} else {
1733 		msg->found_cnt++;
1734 
1735 		rt->rt_refcnt++;
1736 		error = msg->callback(msg->req, &rtinfo, rt, msg->arg,
1737 				      msg->found_cnt);
1738 		rt->rt_refcnt--;
1739 
1740 		if (error == EJUSTRETURN) {
1741 			lwkt_replymsg(&msg->netmsg.nm_lmsg, 0);
1742 			return;
1743 		}
1744 	}
1745 
1746 	nextcpu = mycpuid + 1;
1747 	if (error) {
1748 		KKASSERT(msg->found_cnt > 0);
1749 
1750 		/*
1751 		 * Under following cases, unrecoverable error has
1752 		 * not occured:
1753 		 * o  Request is RTM_GET
1754 		 * o  The first time that we find the route, but the
1755 		 *    modification fails.
1756 		 */
1757 		if (msg->req != RTM_GET && msg->found_cnt > 1) {
1758 			panic("rtsearch_msghandler: unrecoverable error "
1759 			      "cpu %d", mycpuid);
1760 		}
1761 		lwkt_replymsg(&msg->netmsg.nm_lmsg, error);
1762 	} else if (nextcpu < ncpus) {
1763 		lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->netmsg.nm_lmsg);
1764 	} else {
1765 		if (msg->found_cnt == 0) {
1766 			/* The requested route was never seen ... */
1767 			error = ESRCH;
1768 		}
1769 		lwkt_replymsg(&msg->netmsg.nm_lmsg, error);
1770 	}
1771 }
1772 
1773 int
1774 rtmask_add_global(struct sockaddr *mask)
1775 {
1776 	struct netmsg nmsg;
1777 
1778 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1779 		    0, rtmask_add_msghandler);
1780 	nmsg.nm_lmsg.u.ms_resultp = mask;
1781 
1782 	return lwkt_domsg(rtable_portfn(0), &nmsg.nm_lmsg, 0);
1783 }
1784 
1785 struct sockaddr *
1786 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1787 {
1788 	struct radix_node *n;
1789 
1790 #define	clen(s)	(*(u_char *)(s))
1791 	n = rn_addmask((char *)mask, search, 1);
1792 	if (n != NULL &&
1793 	    mask->sa_len >= clen(n->rn_key) &&
1794 	    bcmp((char *)mask + 1,
1795 		 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1796 		return (struct sockaddr *)n->rn_key;
1797 	} else {
1798 		return NULL;
1799 	}
1800 #undef clen
1801 }
1802 
1803 static void
1804 rtmask_add_msghandler(struct netmsg *nmsg)
1805 {
1806 	struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
1807 	struct sockaddr *mask = lmsg->u.ms_resultp;
1808 	int error = 0, nextcpu;
1809 
1810 	if (rtmask_lookup(mask) == NULL)
1811 		error = ENOBUFS;
1812 
1813 	nextcpu = mycpuid + 1;
1814 	if (!error && nextcpu < ncpus)
1815 		lwkt_forwardmsg(rtable_portfn(nextcpu), lmsg);
1816 	else
1817 		lwkt_replymsg(lmsg, error);
1818 }
1819 
1820 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1821 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1822