xref: /dragonfly/sys/net/route.c (revision 16fb0422)
1 /*
2  * Copyright (c) 2004, 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Berkeley and its contributors.
49  * 4. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)route.c	8.3 (Berkeley) 1/9/95
66  * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
67  * $DragonFly: src/sys/net/route.c,v 1.41 2008/11/09 10:50:15 sephe Exp $
68  */
69 
70 #include "opt_inet.h"
71 #include "opt_mpls.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/socket.h>
78 #include <sys/domain.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/globaldata.h>
82 #include <sys/thread.h>
83 
84 #include <net/if.h>
85 #include <net/route.h>
86 #include <net/netisr.h>
87 
88 #include <netinet/in.h>
89 #include <net/ip_mroute/ip_mroute.h>
90 
91 #include <sys/thread2.h>
92 #include <sys/msgport2.h>
93 #include <net/netmsg2.h>
94 
95 #ifdef MPLS
96 #include <netproto/mpls/mpls.h>
97 #endif
98 
99 static struct rtstatistics rtstatistics_percpu[MAXCPU];
100 #ifdef SMP
101 #define rtstat	rtstatistics_percpu[mycpuid]
102 #else
103 #define rtstat	rtstatistics_percpu[0]
104 #endif
105 
106 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
107 struct lwkt_port *rt_ports[MAXCPU];
108 
109 static void	rt_maskedcopy (struct sockaddr *, struct sockaddr *,
110 			       struct sockaddr *);
111 static void rtable_init(void);
112 static void rtable_service_loop(void *dummy);
113 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
114 				      struct rtentry *, void *);
115 
116 #ifdef SMP
117 static void rtredirect_msghandler(netmsg_t msg);
118 static void rtrequest1_msghandler(netmsg_t msg);
119 #endif
120 static void rtsearch_msghandler(netmsg_t msg);
121 static void rtmask_add_msghandler(netmsg_t msg);
122 
123 static int rt_setshims(struct rtentry *, struct sockaddr **);
124 
125 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
126 
127 #ifdef ROUTE_DEBUG
128 static int route_debug = 1;
129 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
130            &route_debug, 0, "");
131 #endif
132 
133 int route_assert_owner_access = 0;
134 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW,
135            &route_assert_owner_access, 0, "");
136 
137 u_long route_kmalloc_limit = 0;
138 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit);
139 
140 /*
141  * Initialize the route table(s) for protocol domains and
142  * create a helper thread which will be responsible for updating
143  * route table entries on each cpu.
144  */
145 void
146 route_init(void)
147 {
148 	int cpu;
149 	thread_t rtd;
150 
151 	for (cpu = 0; cpu < ncpus; ++cpu)
152 		bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
153 	rn_init();      /* initialize all zeroes, all ones, mask table */
154 	rtable_init();	/* call dom_rtattach() on each cpu */
155 
156 	for (cpu = 0; cpu < ncpus; cpu++) {
157 		lwkt_create(rtable_service_loop, NULL, &rtd, NULL,
158 			    0, cpu, "rtable_cpu %d", cpu);
159 		rt_ports[cpu] = &rtd->td_msgport;
160 	}
161 
162 	if (route_kmalloc_limit)
163 		kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit);
164 }
165 
166 static void
167 rtable_init_oncpu(netmsg_t msg)
168 {
169 	struct domain *dom;
170 	int cpu = mycpuid;
171 
172 	SLIST_FOREACH(dom, &domains, dom_next) {
173 		if (dom->dom_rtattach) {
174 			dom->dom_rtattach(
175 				(void **)&rt_tables[cpu][dom->dom_family],
176 			        dom->dom_rtoffset);
177 		}
178 	}
179 	ifnet_forwardmsg(&msg->lmsg, cpu + 1);
180 }
181 
182 static void
183 rtable_init(void)
184 {
185 	struct netmsg_base msg;
186 
187 	netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
188 	ifnet_domsg(&msg.lmsg, 0);
189 }
190 
191 /*
192  * Our per-cpu table management protocol thread.  All route table operations
193  * are sequentially chained through all cpus starting at cpu #0 in order to
194  * maintain duplicate route tables on each cpu.  Having a spearate route
195  * table management thread allows the protocol and interrupt threads to
196  * issue route table changes.
197  */
198 static void
199 rtable_service_loop(void *dummy __unused)
200 {
201 	netmsg_base_t msg;
202 	thread_t td = curthread;
203 
204 	while ((msg = lwkt_waitport(&td->td_msgport, 0)) != NULL) {
205 		msg->nm_dispatch((netmsg_t)msg);
206 	}
207 }
208 
209 /*
210  * Routing statistics.
211  */
212 #ifdef SMP
213 static int
214 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
215 {
216 	int cpu, error = 0;
217 
218 	for (cpu = 0; cpu < ncpus; ++cpu) {
219 		if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
220 					sizeof(struct rtstatistics))))
221 				break;
222 		if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
223 					sizeof(struct rtstatistics))))
224 				break;
225 	}
226 
227 	return (error);
228 }
229 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
230 	0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
231 #else
232 SYSCTL_STRUCT(_net_route, OID_AUTO, stats, CTLFLAG_RW, &rtstat, rtstatistics,
233 "Routing statistics");
234 #endif
235 
236 /*
237  * Packet routing routines.
238  */
239 
240 /*
241  * Look up and fill in the "ro_rt" rtentry field in a route structure given
242  * an address in the "ro_dst" field.  Always send a report on a miss and
243  * always clone routes.
244  */
245 void
246 rtalloc(struct route *ro)
247 {
248 	rtalloc_ign(ro, 0UL);
249 }
250 
251 /*
252  * Look up and fill in the "ro_rt" rtentry field in a route structure given
253  * an address in the "ro_dst" field.  Always send a report on a miss and
254  * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
255  * ignored.
256  */
257 void
258 rtalloc_ign(struct route *ro, u_long ignoreflags)
259 {
260 	if (ro->ro_rt != NULL) {
261 		if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
262 			return;
263 		rtfree(ro->ro_rt);
264 		ro->ro_rt = NULL;
265 	}
266 	ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
267 }
268 
269 /*
270  * Look up the route that matches the given "dst" address.
271  *
272  * Route lookup can have the side-effect of creating and returning
273  * a cloned route instead when "dst" matches a cloning route and the
274  * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
275  *
276  * Any route returned has its reference count incremented.
277  */
278 struct rtentry *
279 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
280 {
281 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
282 	struct rtentry *rt;
283 
284 	if (rnh == NULL)
285 		goto unreach;
286 
287 	/*
288 	 * Look up route in the radix tree.
289 	 */
290 	rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
291 	if (rt == NULL)
292 		goto unreach;
293 
294 	/*
295 	 * Handle cloning routes.
296 	 */
297 	if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
298 		struct rtentry *clonedroute;
299 		int error;
300 
301 		clonedroute = rt;	/* copy in/copy out parameter */
302 		error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
303 				  &clonedroute);	/* clone the route */
304 		if (error != 0) {	/* cloning failed */
305 			if (generate_report)
306 				rt_dstmsg(RTM_MISS, dst, error);
307 			rt->rt_refcnt++;
308 			return (rt);	/* return the uncloned route */
309 		}
310 		if (generate_report) {
311 			if (clonedroute->rt_flags & RTF_XRESOLVE)
312 				rt_dstmsg(RTM_RESOLVE, dst, 0);
313 			else
314 				rt_rtmsg(RTM_ADD, clonedroute,
315 					 clonedroute->rt_ifp, 0);
316 		}
317 		return (clonedroute);	/* return cloned route */
318 	}
319 
320 	/*
321 	 * Increment the reference count of the matched route and return.
322 	 */
323 	rt->rt_refcnt++;
324 	return (rt);
325 
326 unreach:
327 	rtstat.rts_unreach++;
328 	if (generate_report)
329 		rt_dstmsg(RTM_MISS, dst, 0);
330 	return (NULL);
331 }
332 
333 void
334 rtfree(struct rtentry *rt)
335 {
336 	if (rt->rt_cpuid == mycpuid)
337 		rtfree_oncpu(rt);
338 	else
339 		rtfree_remote(rt, 1);
340 }
341 
342 void
343 rtfree_oncpu(struct rtentry *rt)
344 {
345 	KKASSERT(rt->rt_cpuid == mycpuid);
346 	KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
347 
348 	--rt->rt_refcnt;
349 	if (rt->rt_refcnt == 0) {
350 		struct radix_node_head *rnh =
351 		    rt_tables[mycpuid][rt_key(rt)->sa_family];
352 
353 		if (rnh->rnh_close)
354 			rnh->rnh_close((struct radix_node *)rt, rnh);
355 		if (!(rt->rt_flags & RTF_UP)) {
356 			/* deallocate route */
357 			if (rt->rt_ifa != NULL)
358 				IFAFREE(rt->rt_ifa);
359 			if (rt->rt_parent != NULL)
360 				RTFREE(rt->rt_parent);	/* recursive call! */
361 			Free(rt_key(rt));
362 			Free(rt);
363 		}
364 	}
365 }
366 
367 static void
368 rtfree_remote_dispatch(netmsg_t msg)
369 {
370 	struct lwkt_msg *lmsg = &msg->lmsg;
371 	struct rtentry *rt = lmsg->u.ms_resultp;
372 
373 	rtfree_oncpu(rt);
374 	lwkt_replymsg(lmsg, 0);
375 }
376 
377 void
378 rtfree_remote(struct rtentry *rt, int allow_panic)
379 {
380 	struct netmsg_base msg;
381 	struct lwkt_msg *lmsg;
382 
383 	KKASSERT(rt->rt_cpuid != mycpuid);
384 
385 	if (route_assert_owner_access && allow_panic) {
386 		panic("rt remote free rt_cpuid %d, mycpuid %d\n",
387 		      rt->rt_cpuid, mycpuid);
388 	} else {
389 		kprintf("rt remote free rt_cpuid %d, mycpuid %d\n",
390 			rt->rt_cpuid, mycpuid);
391 		print_backtrace(-1);
392 	}
393 
394 	netmsg_init(&msg, NULL, &curthread->td_msgport,
395 		    0, rtfree_remote_dispatch);
396 	lmsg = &msg.lmsg;
397 	lmsg->u.ms_resultp = rt;
398 
399 	lwkt_domsg(rtable_portfn(rt->rt_cpuid), lmsg, 0);
400 }
401 
402 static int
403 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
404 		 struct sockaddr *netmask, int flags, struct sockaddr *src)
405 {
406 	struct rtentry *rt = NULL;
407 	struct rt_addrinfo rtinfo;
408 	struct ifaddr *ifa;
409 	u_long *stat = NULL;
410 	int error;
411 
412 	/* verify the gateway is directly reachable */
413 	if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
414 		error = ENETUNREACH;
415 		goto out;
416 	}
417 
418 	/*
419 	 * If the redirect isn't from our current router for this destination,
420 	 * it's either old or wrong.
421 	 */
422 	if (!(flags & RTF_DONE) &&		/* XXX JH */
423 	    (rt = rtpurelookup(dst)) != NULL &&
424 	    (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
425 		error = EINVAL;
426 		goto done;
427 	}
428 
429 	/*
430 	 * If it redirects us to ourselves, we have a routing loop,
431 	 * perhaps as a result of an interface going down recently.
432 	 */
433 	if (ifa_ifwithaddr(gateway)) {
434 		error = EHOSTUNREACH;
435 		goto done;
436 	}
437 
438 	/*
439 	 * Create a new entry if the lookup failed or if we got back
440 	 * a wildcard entry for the default route.  This is necessary
441 	 * for hosts which use routing redirects generated by smart
442 	 * gateways to dynamically build the routing tables.
443 	 */
444 	if (rt == NULL)
445 		goto create;
446 	if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
447 		rtfree(rt);
448 		goto create;
449 	}
450 
451 	/* Ignore redirects for directly connected hosts. */
452 	if (!(rt->rt_flags & RTF_GATEWAY)) {
453 		error = EHOSTUNREACH;
454 		goto done;
455 	}
456 
457 	if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
458 		/*
459 		 * Changing from a network route to a host route.
460 		 * Create a new host route rather than smashing the
461 		 * network route.
462 		 */
463 create:
464 		flags |=  RTF_GATEWAY | RTF_DYNAMIC;
465 		bzero(&rtinfo, sizeof(struct rt_addrinfo));
466 		rtinfo.rti_info[RTAX_DST] = dst;
467 		rtinfo.rti_info[RTAX_GATEWAY] = gateway;
468 		rtinfo.rti_info[RTAX_NETMASK] = netmask;
469 		rtinfo.rti_flags = flags;
470 		rtinfo.rti_ifa = ifa;
471 		rt = NULL;	/* copy-in/copy-out parameter */
472 		error = rtrequest1(RTM_ADD, &rtinfo, &rt);
473 		if (rt != NULL)
474 			flags = rt->rt_flags;
475 		stat = &rtstat.rts_dynamic;
476 	} else {
477 		/*
478 		 * Smash the current notion of the gateway to this destination.
479 		 * Should check about netmask!!!
480 		 */
481 		rt->rt_flags |= RTF_MODIFIED;
482 		flags |= RTF_MODIFIED;
483 
484 		/* We only need to report rtmsg on CPU0 */
485 		rt_setgate(rt, rt_key(rt), gateway,
486 			   mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
487 		error = 0;
488 		stat = &rtstat.rts_newgateway;
489 	}
490 
491 done:
492 	if (rt != NULL)
493 		rtfree(rt);
494 out:
495 	if (error != 0)
496 		rtstat.rts_badredirect++;
497 	else if (stat != NULL)
498 		(*stat)++;
499 
500 	return error;
501 }
502 
503 #ifdef SMP
504 
505 struct netmsg_rtredirect {
506 	struct netmsg_base base;
507 	struct sockaddr *dst;
508 	struct sockaddr *gateway;
509 	struct sockaddr *netmask;
510 	int		flags;
511 	struct sockaddr *src;
512 };
513 
514 #endif
515 
516 /*
517  * Force a routing table entry to the specified
518  * destination to go through the given gateway.
519  * Normally called as a result of a routing redirect
520  * message from the network layer.
521  *
522  * N.B.: must be called at splnet
523  */
524 void
525 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
526 	   struct sockaddr *netmask, int flags, struct sockaddr *src)
527 {
528 	struct rt_addrinfo rtinfo;
529 	int error;
530 #ifdef SMP
531 	struct netmsg_rtredirect msg;
532 
533 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
534 		    0, rtredirect_msghandler);
535 	msg.dst = dst;
536 	msg.gateway = gateway;
537 	msg.netmask = netmask;
538 	msg.flags = flags;
539 	msg.src = src;
540 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
541 #else
542 	error = rtredirect_oncpu(dst, gateway, netmask, flags, src);
543 #endif
544 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
545 	rtinfo.rti_info[RTAX_DST] = dst;
546 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
547 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
548 	rtinfo.rti_info[RTAX_AUTHOR] = src;
549 	rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
550 }
551 
552 #ifdef SMP
553 
554 static void
555 rtredirect_msghandler(netmsg_t msg)
556 {
557 	struct netmsg_rtredirect *rmsg = (void *)msg;
558 	int nextcpu;
559 
560 	rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
561 			 rmsg->flags, rmsg->src);
562 	nextcpu = mycpuid + 1;
563 	if (nextcpu < ncpus)
564 		lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->lmsg);
565 	else
566 		lwkt_replymsg(&msg->lmsg, 0);
567 }
568 
569 #endif
570 
571 /*
572 * Routing table ioctl interface.
573 */
574 int
575 rtioctl(u_long req, caddr_t data, struct ucred *cred)
576 {
577 #ifdef INET
578 	/* Multicast goop, grrr... */
579 	return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
580 #else
581 	return ENXIO;
582 #endif
583 }
584 
585 struct ifaddr *
586 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
587 {
588 	struct ifaddr *ifa;
589 
590 	if (!(flags & RTF_GATEWAY)) {
591 		/*
592 		 * If we are adding a route to an interface,
593 		 * and the interface is a point-to-point link,
594 		 * we should search for the destination
595 		 * as our clue to the interface.  Otherwise
596 		 * we can use the local address.
597 		 */
598 		ifa = NULL;
599 		if (flags & RTF_HOST) {
600 			ifa = ifa_ifwithdstaddr(dst);
601 		}
602 		if (ifa == NULL)
603 			ifa = ifa_ifwithaddr(gateway);
604 	} else {
605 		/*
606 		 * If we are adding a route to a remote net
607 		 * or host, the gateway may still be on the
608 		 * other end of a pt to pt link.
609 		 */
610 		ifa = ifa_ifwithdstaddr(gateway);
611 	}
612 	if (ifa == NULL)
613 		ifa = ifa_ifwithnet(gateway);
614 	if (ifa == NULL) {
615 		struct rtentry *rt;
616 
617 		rt = rtpurelookup(gateway);
618 		if (rt == NULL)
619 			return (NULL);
620 		rt->rt_refcnt--;
621 		if ((ifa = rt->rt_ifa) == NULL)
622 			return (NULL);
623 	}
624 	if (ifa->ifa_addr->sa_family != dst->sa_family) {
625 		struct ifaddr *oldifa = ifa;
626 
627 		ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
628 		if (ifa == NULL)
629 			ifa = oldifa;
630 	}
631 	return (ifa);
632 }
633 
634 static int rt_fixdelete (struct radix_node *, void *);
635 static int rt_fixchange (struct radix_node *, void *);
636 
637 struct rtfc_arg {
638 	struct rtentry *rt0;
639 	struct radix_node_head *rnh;
640 };
641 
642 /*
643  * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
644  */
645 int
646 rt_getifa(struct rt_addrinfo *rtinfo)
647 {
648 	struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
649 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
650 	struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
651 	int flags = rtinfo->rti_flags;
652 
653 	/*
654 	 * ifp may be specified by sockaddr_dl
655 	 * when protocol address is ambiguous.
656 	 */
657 	if (rtinfo->rti_ifp == NULL) {
658 		struct sockaddr *ifpaddr;
659 
660 		ifpaddr = rtinfo->rti_info[RTAX_IFP];
661 		if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
662 			struct ifaddr *ifa;
663 
664 			ifa = ifa_ifwithnet(ifpaddr);
665 			if (ifa != NULL)
666 				rtinfo->rti_ifp = ifa->ifa_ifp;
667 		}
668 	}
669 
670 	if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
671 		rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
672 	if (rtinfo->rti_ifa == NULL) {
673 		struct sockaddr *sa;
674 
675 		sa = ifaaddr != NULL ? ifaaddr :
676 		    (gateway != NULL ? gateway : dst);
677 		if (sa != NULL && rtinfo->rti_ifp != NULL)
678 			rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
679 		else if (dst != NULL && gateway != NULL)
680 			rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
681 		else if (sa != NULL)
682 			rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
683 	}
684 	if (rtinfo->rti_ifa == NULL)
685 		return (ENETUNREACH);
686 
687 	if (rtinfo->rti_ifp == NULL)
688 		rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
689 	return (0);
690 }
691 
692 /*
693  * Do appropriate manipulations of a routing tree given
694  * all the bits of info needed
695  */
696 int
697 rtrequest(
698 	int req,
699 	struct sockaddr *dst,
700 	struct sockaddr *gateway,
701 	struct sockaddr *netmask,
702 	int flags,
703 	struct rtentry **ret_nrt)
704 {
705 	struct rt_addrinfo rtinfo;
706 
707 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
708 	rtinfo.rti_info[RTAX_DST] = dst;
709 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
710 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
711 	rtinfo.rti_flags = flags;
712 	return rtrequest1(req, &rtinfo, ret_nrt);
713 }
714 
715 int
716 rtrequest_global(
717 	int req,
718 	struct sockaddr *dst,
719 	struct sockaddr *gateway,
720 	struct sockaddr *netmask,
721 	int flags)
722 {
723 	struct rt_addrinfo rtinfo;
724 
725 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
726 	rtinfo.rti_info[RTAX_DST] = dst;
727 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
728 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
729 	rtinfo.rti_flags = flags;
730 	return rtrequest1_global(req, &rtinfo, NULL, NULL);
731 }
732 
733 #ifdef SMP
734 
735 struct netmsg_rtq {
736 	struct netmsg_base	base;
737 	int			req;
738 	struct rt_addrinfo	*rtinfo;
739 	rtrequest1_callback_func_t callback;
740 	void			*arg;
741 };
742 
743 #endif
744 
745 int
746 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
747 		  rtrequest1_callback_func_t callback, void *arg)
748 {
749 	int error;
750 #ifdef SMP
751 	struct netmsg_rtq msg;
752 
753 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
754 		    0, rtrequest1_msghandler);
755 	msg.base.lmsg.ms_error = -1;
756 	msg.req = req;
757 	msg.rtinfo = rtinfo;
758 	msg.callback = callback;
759 	msg.arg = arg;
760 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
761 #else
762 	struct rtentry *rt = NULL;
763 
764 	error = rtrequest1(req, rtinfo, &rt);
765 	if (rt)
766 		--rt->rt_refcnt;
767 	if (callback)
768 		callback(req, error, rtinfo, rt, arg);
769 #endif
770 	return (error);
771 }
772 
773 /*
774  * Handle a route table request on the current cpu.  Since the route table's
775  * are supposed to be identical on each cpu, an error occuring later in the
776  * message chain is considered system-fatal.
777  */
778 #ifdef SMP
779 
780 static void
781 rtrequest1_msghandler(netmsg_t msg)
782 {
783 	struct netmsg_rtq *rmsg = (void *)msg;
784 	struct rt_addrinfo rtinfo;
785 	struct rtentry *rt = NULL;
786 	int nextcpu;
787 	int error;
788 
789 	/*
790 	 * Copy the rtinfo.  We need to make sure that the original
791 	 * rtinfo, which is setup by the caller, in the netmsg will
792 	 * _not_ be changed; else the next CPU on the netmsg forwarding
793 	 * path will see a different rtinfo than what this CPU has seen.
794 	 */
795 	rtinfo = *rmsg->rtinfo;
796 
797 	error = rtrequest1(rmsg->req, &rtinfo, &rt);
798 	if (rt)
799 		--rt->rt_refcnt;
800 	if (rmsg->callback)
801 		rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
802 
803 	/*
804 	 * RTM_DELETE's are propogated even if an error occurs, since a
805 	 * cloned route might be undergoing deletion and cloned routes
806 	 * are not necessarily replicated.  An overall error is returned
807 	 * only if no cpus have the route in question.
808 	 */
809 	if (rmsg->base.lmsg.ms_error < 0 || error == 0)
810 		rmsg->base.lmsg.ms_error = error;
811 
812 	nextcpu = mycpuid + 1;
813 	if (error && rmsg->req != RTM_DELETE) {
814 		if (mycpuid != 0) {
815 			panic("rtrequest1_msghandler: rtrequest table "
816 			      "error was cpu%d, err %d\n", mycpuid, error);
817 		}
818 		lwkt_replymsg(&rmsg->base.lmsg, error);
819 	} else if (nextcpu < ncpus) {
820 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
821 	} else {
822 		lwkt_replymsg(&rmsg->base.lmsg, rmsg->base.lmsg.ms_error);
823 	}
824 }
825 
826 #endif
827 
828 int
829 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
830 {
831 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
832 	struct rtentry *rt;
833 	struct radix_node *rn;
834 	struct radix_node_head *rnh;
835 	struct ifaddr *ifa;
836 	struct sockaddr *ndst;
837 	boolean_t reportmsg;
838 	int error = 0;
839 
840 #define gotoerr(x) { error = x ; goto bad; }
841 
842 #ifdef ROUTE_DEBUG
843 	if (route_debug)
844 		rt_addrinfo_print(req, rtinfo);
845 #endif
846 
847 	crit_enter();
848 	/*
849 	 * Find the correct routing tree to use for this Address Family
850 	 */
851 	if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
852 		gotoerr(EAFNOSUPPORT);
853 
854 	/*
855 	 * If we are adding a host route then we don't want to put
856 	 * a netmask in the tree, nor do we want to clone it.
857 	 */
858 	if (rtinfo->rti_flags & RTF_HOST) {
859 		rtinfo->rti_info[RTAX_NETMASK] = NULL;
860 		rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
861 	}
862 
863 	switch (req) {
864 	case RTM_DELETE:
865 		/* Remove the item from the tree. */
866 		rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
867 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
868 				      rnh);
869 		if (rn == NULL)
870 			gotoerr(ESRCH);
871 		KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
872 			("rnh_deladdr returned flags 0x%x", rn->rn_flags));
873 		rt = (struct rtentry *)rn;
874 
875 		/* ref to prevent a deletion race */
876 		++rt->rt_refcnt;
877 
878 		/* Free any routes cloned from this one. */
879 		if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
880 		    rt_mask(rt) != NULL) {
881 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
882 					       (char *)rt_mask(rt),
883 					       rt_fixdelete, rt);
884 		}
885 
886 		if (rt->rt_gwroute != NULL) {
887 			RTFREE(rt->rt_gwroute);
888 			rt->rt_gwroute = NULL;
889 		}
890 
891 		/*
892 		 * NB: RTF_UP must be set during the search above,
893 		 * because we might delete the last ref, causing
894 		 * rt to get freed prematurely.
895 		 */
896 		rt->rt_flags &= ~RTF_UP;
897 
898 #ifdef ROUTE_DEBUG
899 		if (route_debug)
900 			rt_print(rtinfo, rt);
901 #endif
902 
903 		/* Give the protocol a chance to keep things in sync. */
904 		if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
905 			ifa->ifa_rtrequest(RTM_DELETE, rt, rtinfo);
906 
907 		/*
908 		 * If the caller wants it, then it can have it,
909 		 * but it's up to it to free the rtentry as we won't be
910 		 * doing it.
911 		 */
912 		KASSERT(rt->rt_refcnt >= 0,
913 			("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
914 		if (ret_nrt != NULL) {
915 			/* leave ref intact for return */
916 			*ret_nrt = rt;
917 		} else {
918 			/* deref / attempt to destroy */
919 			rtfree(rt);
920 		}
921 		break;
922 
923 	case RTM_RESOLVE:
924 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
925 			gotoerr(EINVAL);
926 		ifa = rt->rt_ifa;
927 		rtinfo->rti_flags =
928 		    rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
929 		rtinfo->rti_flags |= RTF_WASCLONED;
930 		rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
931 		if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
932 			rtinfo->rti_flags |= RTF_HOST;
933 		rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
934 		rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
935 		rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
936 		goto makeroute;
937 
938 	case RTM_ADD:
939 		KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
940 			rtinfo->rti_info[RTAX_GATEWAY] != NULL,
941 		    ("rtrequest: GATEWAY but no gateway"));
942 
943 		if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
944 			gotoerr(error);
945 		ifa = rtinfo->rti_ifa;
946 makeroute:
947 		R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
948 		if (rt == NULL) {
949 			if (req == RTM_ADD) {
950 				kprintf("rtrequest1: alloc rtentry failed on "
951 				    "cpu%d\n", mycpuid);
952 			}
953 			gotoerr(ENOBUFS);
954 		}
955 		bzero(rt, sizeof(struct rtentry));
956 		rt->rt_flags = RTF_UP | rtinfo->rti_flags;
957 		rt->rt_cpuid = mycpuid;
958 
959 		if (mycpuid != 0 && req == RTM_ADD) {
960 			/* For RTM_ADD, we have already sent rtmsg on CPU0. */
961 			reportmsg = RTL_DONTREPORT;
962 		} else {
963 			/*
964 			 * For RTM_ADD, we only send rtmsg on CPU0.
965 			 * For RTM_RESOLVE, we always send rtmsg. XXX
966 			 */
967 			reportmsg = RTL_REPORTMSG;
968 		}
969 		error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
970 				   reportmsg);
971 		if (error != 0) {
972 			Free(rt);
973 			gotoerr(error);
974 		}
975 
976 		ndst = rt_key(rt);
977 		if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
978 			rt_maskedcopy(dst, ndst,
979 				      rtinfo->rti_info[RTAX_NETMASK]);
980 		else
981 			bcopy(dst, ndst, dst->sa_len);
982 
983 		if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
984 			rt_setshims(rt, rtinfo->rti_info);
985 
986 		/*
987 		 * Note that we now have a reference to the ifa.
988 		 * This moved from below so that rnh->rnh_addaddr() can
989 		 * examine the ifa and  ifa->ifa_ifp if it so desires.
990 		 */
991 		IFAREF(ifa);
992 		rt->rt_ifa = ifa;
993 		rt->rt_ifp = ifa->ifa_ifp;
994 		/* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
995 
996 		rn = rnh->rnh_addaddr((char *)ndst,
997 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
998 				      rnh, rt->rt_nodes);
999 		if (rn == NULL) {
1000 			struct rtentry *oldrt;
1001 
1002 			/*
1003 			 * We already have one of these in the tree.
1004 			 * We do a special hack: if the old route was
1005 			 * cloned, then we blow it away and try
1006 			 * re-inserting the new one.
1007 			 */
1008 			oldrt = rtpurelookup(ndst);
1009 			if (oldrt != NULL) {
1010 				--oldrt->rt_refcnt;
1011 				if (oldrt->rt_flags & RTF_WASCLONED) {
1012 					rtrequest(RTM_DELETE, rt_key(oldrt),
1013 						  oldrt->rt_gateway,
1014 						  rt_mask(oldrt),
1015 						  oldrt->rt_flags, NULL);
1016 					rn = rnh->rnh_addaddr((char *)ndst,
1017 					    (char *)
1018 						rtinfo->rti_info[RTAX_NETMASK],
1019 					    rnh, rt->rt_nodes);
1020 				}
1021 			}
1022 		}
1023 
1024 		/*
1025 		 * If it still failed to go into the tree,
1026 		 * then un-make it (this should be a function).
1027 		 */
1028 		if (rn == NULL) {
1029 			if (rt->rt_gwroute != NULL)
1030 				rtfree(rt->rt_gwroute);
1031 			IFAFREE(ifa);
1032 			Free(rt_key(rt));
1033 			Free(rt);
1034 			gotoerr(EEXIST);
1035 		}
1036 
1037 		/*
1038 		 * If we got here from RESOLVE, then we are cloning
1039 		 * so clone the rest, and note that we
1040 		 * are a clone (and increment the parent's references)
1041 		 */
1042 		if (req == RTM_RESOLVE) {
1043 			rt->rt_rmx = (*ret_nrt)->rt_rmx;    /* copy metrics */
1044 			rt->rt_rmx.rmx_pksent = 0;  /* reset packet counter */
1045 			if ((*ret_nrt)->rt_flags &
1046 				       (RTF_CLONING | RTF_PRCLONING)) {
1047 				rt->rt_parent = *ret_nrt;
1048 				(*ret_nrt)->rt_refcnt++;
1049 			}
1050 		}
1051 
1052 		/*
1053 		 * if this protocol has something to add to this then
1054 		 * allow it to do that as well.
1055 		 */
1056 		if (ifa->ifa_rtrequest != NULL)
1057 			ifa->ifa_rtrequest(req, rt, rtinfo);
1058 
1059 		/*
1060 		 * We repeat the same procedure from rt_setgate() here because
1061 		 * it doesn't fire when we call it there because the node
1062 		 * hasn't been added to the tree yet.
1063 		 */
1064 		if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
1065 		    rt_mask(rt) != NULL) {
1066 			struct rtfc_arg arg = { rt, rnh };
1067 
1068 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1069 					       (char *)rt_mask(rt),
1070 					       rt_fixchange, &arg);
1071 		}
1072 
1073 #ifdef ROUTE_DEBUG
1074 		if (route_debug)
1075 			rt_print(rtinfo, rt);
1076 #endif
1077 		/*
1078 		 * Return the resulting rtentry,
1079 		 * increasing the number of references by one.
1080 		 */
1081 		if (ret_nrt != NULL) {
1082 			rt->rt_refcnt++;
1083 			*ret_nrt = rt;
1084 		}
1085 		break;
1086 	default:
1087 		error = EOPNOTSUPP;
1088 	}
1089 bad:
1090 #ifdef ROUTE_DEBUG
1091 	if (route_debug) {
1092 		if (error)
1093 			kprintf("rti %p failed error %d\n", rtinfo, error);
1094 		else
1095 			kprintf("rti %p succeeded\n", rtinfo);
1096 	}
1097 #endif
1098 	crit_exit();
1099 	return (error);
1100 }
1101 
1102 /*
1103  * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1104  * (i.e., the routes related to it by the operation of cloning).  This
1105  * routine is iterated over all potential former-child-routes by way of
1106  * rnh->rnh_walktree_from() above, and those that actually are children of
1107  * the late parent (passed in as VP here) are themselves deleted.
1108  */
1109 static int
1110 rt_fixdelete(struct radix_node *rn, void *vp)
1111 {
1112 	struct rtentry *rt = (struct rtentry *)rn;
1113 	struct rtentry *rt0 = vp;
1114 
1115 	if (rt->rt_parent == rt0 &&
1116 	    !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1117 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1118 				 rt->rt_flags, NULL);
1119 	}
1120 	return 0;
1121 }
1122 
1123 /*
1124  * This routine is called from rt_setgate() to do the analogous thing for
1125  * adds and changes.  There is the added complication in this case of a
1126  * middle insert; i.e., insertion of a new network route between an older
1127  * network route and (cloned) host routes.  For this reason, a simple check
1128  * of rt->rt_parent is insufficient; each candidate route must be tested
1129  * against the (mask, value) of the new route (passed as before in vp)
1130  * to see if the new route matches it.
1131  *
1132  * XXX - it may be possible to do fixdelete() for changes and reserve this
1133  * routine just for adds.  I'm not sure why I thought it was necessary to do
1134  * changes this way.
1135  */
1136 #ifdef DEBUG
1137 static int rtfcdebug = 0;
1138 #endif
1139 
1140 static int
1141 rt_fixchange(struct radix_node *rn, void *vp)
1142 {
1143 	struct rtentry *rt = (struct rtentry *)rn;
1144 	struct rtfc_arg *ap = vp;
1145 	struct rtentry *rt0 = ap->rt0;
1146 	struct radix_node_head *rnh = ap->rnh;
1147 	u_char *xk1, *xm1, *xk2, *xmp;
1148 	int i, len, mlen;
1149 
1150 #ifdef DEBUG
1151 	if (rtfcdebug)
1152 		kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1153 #endif
1154 
1155 	if (rt->rt_parent == NULL ||
1156 	    (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1157 #ifdef DEBUG
1158 		if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1159 #endif
1160 		return 0;
1161 	}
1162 
1163 	if (rt->rt_parent == rt0) {
1164 #ifdef DEBUG
1165 		if (rtfcdebug) kprintf("parent match\n");
1166 #endif
1167 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1168 				 rt->rt_flags, NULL);
1169 	}
1170 
1171 	/*
1172 	 * There probably is a function somewhere which does this...
1173 	 * if not, there should be.
1174 	 */
1175 	len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1176 
1177 	xk1 = (u_char *)rt_key(rt0);
1178 	xm1 = (u_char *)rt_mask(rt0);
1179 	xk2 = (u_char *)rt_key(rt);
1180 
1181 	/* avoid applying a less specific route */
1182 	xmp = (u_char *)rt_mask(rt->rt_parent);
1183 	mlen = rt_key(rt->rt_parent)->sa_len;
1184 	if (mlen > rt_key(rt0)->sa_len) {
1185 #ifdef DEBUG
1186 		if (rtfcdebug)
1187 			kprintf("rt_fixchange: inserting a less "
1188 			       "specific route\n");
1189 #endif
1190 		return 0;
1191 	}
1192 	for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1193 		if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1194 #ifdef DEBUG
1195 			if (rtfcdebug)
1196 				kprintf("rt_fixchange: inserting a less "
1197 				       "specific route\n");
1198 #endif
1199 			return 0;
1200 		}
1201 	}
1202 
1203 	for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1204 		if ((xk2[i] & xm1[i]) != xk1[i]) {
1205 #ifdef DEBUG
1206 			if (rtfcdebug) kprintf("no match\n");
1207 #endif
1208 			return 0;
1209 		}
1210 	}
1211 
1212 	/*
1213 	 * OK, this node is a clone, and matches the node currently being
1214 	 * changed/added under the node's mask.  So, get rid of it.
1215 	 */
1216 #ifdef DEBUG
1217 	if (rtfcdebug) kprintf("deleting\n");
1218 #endif
1219 	return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1220 			 rt->rt_flags, NULL);
1221 }
1222 
1223 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
1224 
1225 int
1226 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1227 	   boolean_t generate_report)
1228 {
1229 	char *space, *oldspace;
1230 	int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
1231 	struct rtentry *rt = rt0;
1232 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1233 
1234 	/*
1235 	 * A host route with the destination equal to the gateway
1236 	 * will interfere with keeping LLINFO in the routing
1237 	 * table, so disallow it.
1238 	 */
1239 	if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1240 			      (RTF_HOST | RTF_GATEWAY)) &&
1241 	    dst->sa_len == gate->sa_len &&
1242 	    sa_equal(dst, gate)) {
1243 		/*
1244 		 * The route might already exist if this is an RTM_CHANGE
1245 		 * or a routing redirect, so try to delete it.
1246 		 */
1247 		if (rt_key(rt0) != NULL)
1248 			rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1249 				  rt_mask(rt0), rt0->rt_flags, NULL);
1250 		return EADDRNOTAVAIL;
1251 	}
1252 
1253 	/*
1254 	 * Both dst and gateway are stored in the same malloc'ed chunk
1255 	 * (If I ever get my hands on....)
1256 	 * if we need to malloc a new chunk, then keep the old one around
1257 	 * till we don't need it any more.
1258 	 */
1259 	if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
1260 		oldspace = (char *)rt_key(rt);
1261 		R_Malloc(space, char *, dlen + glen);
1262 		if (space == NULL)
1263 			return ENOBUFS;
1264 		rt->rt_nodes->rn_key = space;
1265 	} else {
1266 		space = (char *)rt_key(rt);	/* Just use the old space. */
1267 		oldspace = NULL;
1268 	}
1269 
1270 	/* Set the gateway value. */
1271 	rt->rt_gateway = (struct sockaddr *)(space + dlen);
1272 	bcopy(gate, rt->rt_gateway, glen);
1273 
1274 	if (oldspace != NULL) {
1275 		/*
1276 		 * If we allocated a new chunk, preserve the original dst.
1277 		 * This way, rt_setgate() really just sets the gate
1278 		 * and leaves the dst field alone.
1279 		 */
1280 		bcopy(dst, space, dlen);
1281 		Free(oldspace);
1282 	}
1283 
1284 	/*
1285 	 * If there is already a gwroute, it's now almost definitely wrong
1286 	 * so drop it.
1287 	 */
1288 	if (rt->rt_gwroute != NULL) {
1289 		RTFREE(rt->rt_gwroute);
1290 		rt->rt_gwroute = NULL;
1291 	}
1292 	if (rt->rt_flags & RTF_GATEWAY) {
1293 		/*
1294 		 * Cloning loop avoidance: In the presence of
1295 		 * protocol-cloning and bad configuration, it is
1296 		 * possible to get stuck in bottomless mutual recursion
1297 		 * (rtrequest rt_setgate rtlookup).  We avoid this
1298 		 * by not allowing protocol-cloning to operate for
1299 		 * gateways (which is probably the correct choice
1300 		 * anyway), and avoid the resulting reference loops
1301 		 * by disallowing any route to run through itself as
1302 		 * a gateway.  This is obviously mandatory when we
1303 		 * get rt->rt_output().
1304 		 *
1305 		 * This breaks TTCP for hosts outside the gateway!  XXX JH
1306 		 */
1307 		rt->rt_gwroute = _rtlookup(gate, generate_report,
1308 					   RTF_PRCLONING);
1309 		if (rt->rt_gwroute == rt) {
1310 			rt->rt_gwroute = NULL;
1311 			--rt->rt_refcnt;
1312 			return EDQUOT; /* failure */
1313 		}
1314 	}
1315 
1316 	/*
1317 	 * This isn't going to do anything useful for host routes, so
1318 	 * don't bother.  Also make sure we have a reasonable mask
1319 	 * (we don't yet have one during adds).
1320 	 */
1321 	if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1322 		struct rtfc_arg arg = { rt, rnh };
1323 
1324 		rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1325 				       (char *)rt_mask(rt),
1326 				       rt_fixchange, &arg);
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 static void
1333 rt_maskedcopy(
1334 	struct sockaddr *src,
1335 	struct sockaddr *dst,
1336 	struct sockaddr *netmask)
1337 {
1338 	u_char *cp1 = (u_char *)src;
1339 	u_char *cp2 = (u_char *)dst;
1340 	u_char *cp3 = (u_char *)netmask;
1341 	u_char *cplim = cp2 + *cp3;
1342 	u_char *cplim2 = cp2 + *cp1;
1343 
1344 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1345 	cp3 += 2;
1346 	if (cplim > cplim2)
1347 		cplim = cplim2;
1348 	while (cp2 < cplim)
1349 		*cp2++ = *cp1++ & *cp3++;
1350 	if (cp2 < cplim2)
1351 		bzero(cp2, cplim2 - cp2);
1352 }
1353 
1354 int
1355 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1356 {
1357 	struct rtentry *up_rt, *rt;
1358 
1359 	if (!(rt0->rt_flags & RTF_UP)) {
1360 		up_rt = rtlookup(dst);
1361 		if (up_rt == NULL)
1362 			return (EHOSTUNREACH);
1363 		up_rt->rt_refcnt--;
1364 	} else
1365 		up_rt = rt0;
1366 	if (up_rt->rt_flags & RTF_GATEWAY) {
1367 		if (up_rt->rt_gwroute == NULL) {
1368 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1369 			if (up_rt->rt_gwroute == NULL)
1370 				return (EHOSTUNREACH);
1371 		} else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1372 			rtfree(up_rt->rt_gwroute);
1373 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1374 			if (up_rt->rt_gwroute == NULL)
1375 				return (EHOSTUNREACH);
1376 		}
1377 		rt = up_rt->rt_gwroute;
1378 	} else
1379 		rt = up_rt;
1380 	if (rt->rt_flags & RTF_REJECT &&
1381 	    (rt->rt_rmx.rmx_expire == 0 ||		/* rt doesn't expire */
1382 	     time_second < rt->rt_rmx.rmx_expire))	/* rt not expired */
1383 		return (rt->rt_flags & RTF_HOST ?  EHOSTDOWN : EHOSTUNREACH);
1384 	*drt = rt;
1385 	return 0;
1386 }
1387 
1388 static int
1389 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1390 	int i;
1391 
1392 	for (i=0; i<3; i++) {
1393 		struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1394 		int shimlen;
1395 
1396 		if (shim == NULL)
1397 			break;
1398 
1399 		shimlen = ROUNDUP(shim->sa_len);
1400 		R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1401 		bcopy(shim, rt->rt_shim[i], shimlen);
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 #ifdef ROUTE_DEBUG
1408 
1409 /*
1410  * Print out a route table entry
1411  */
1412 void
1413 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1414 {
1415 	kprintf("rti %p cpu %d route %p flags %08lx: ",
1416 		rtinfo, mycpuid, rn, rn->rt_flags);
1417 	sockaddr_print(rt_key(rn));
1418 	kprintf(" mask ");
1419 	sockaddr_print(rt_mask(rn));
1420 	kprintf(" gw ");
1421 	sockaddr_print(rn->rt_gateway);
1422 	kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1423 	kprintf(" ifa %p\n", rn->rt_ifa);
1424 }
1425 
1426 void
1427 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1428 {
1429 	int didit = 0;
1430 	int i;
1431 
1432 #ifdef ROUTE_DEBUG
1433 	if (cmd == RTM_DELETE && route_debug > 1)
1434 		print_backtrace(-1);
1435 #endif
1436 
1437 	switch(cmd) {
1438 	case RTM_ADD:
1439 		kprintf("ADD ");
1440 		break;
1441 	case RTM_RESOLVE:
1442 		kprintf("RES ");
1443 		break;
1444 	case RTM_DELETE:
1445 		kprintf("DEL ");
1446 		break;
1447 	default:
1448 		kprintf("C%02d ", cmd);
1449 		break;
1450 	}
1451 	kprintf("rti %p cpu %d ", rti, mycpuid);
1452 	for (i = 0; i < rti->rti_addrs; ++i) {
1453 		if (rti->rti_info[i] == NULL)
1454 			continue;
1455 		if (didit)
1456 			kprintf(" ,");
1457 		switch(i) {
1458 		case RTAX_DST:
1459 			kprintf("(DST ");
1460 			break;
1461 		case RTAX_GATEWAY:
1462 			kprintf("(GWY ");
1463 			break;
1464 		case RTAX_NETMASK:
1465 			kprintf("(MSK ");
1466 			break;
1467 		case RTAX_GENMASK:
1468 			kprintf("(GEN ");
1469 			break;
1470 		case RTAX_IFP:
1471 			kprintf("(IFP ");
1472 			break;
1473 		case RTAX_IFA:
1474 			kprintf("(IFA ");
1475 			break;
1476 		case RTAX_AUTHOR:
1477 			kprintf("(AUT ");
1478 			break;
1479 		case RTAX_BRD:
1480 			kprintf("(BRD ");
1481 			break;
1482 		default:
1483 			kprintf("(?%02d ", i);
1484 			break;
1485 		}
1486 		sockaddr_print(rti->rti_info[i]);
1487 		kprintf(")");
1488 		didit = 1;
1489 	}
1490 	kprintf("\n");
1491 }
1492 
1493 void
1494 sockaddr_print(struct sockaddr *sa)
1495 {
1496 	struct sockaddr_in *sa4;
1497 	struct sockaddr_in6 *sa6;
1498 	int len;
1499 	int i;
1500 
1501 	if (sa == NULL) {
1502 		kprintf("NULL");
1503 		return;
1504 	}
1505 
1506 	len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1507 
1508 	switch(sa->sa_family) {
1509 	case AF_INET:
1510 	case AF_INET6:
1511 	default:
1512 		switch(sa->sa_family) {
1513 		case AF_INET:
1514 			sa4 = (struct sockaddr_in *)sa;
1515 			kprintf("INET %d %d.%d.%d.%d",
1516 				ntohs(sa4->sin_port),
1517 				(ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1518 				(ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1519 				(ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1520 				(ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1521 			);
1522 			break;
1523 		case AF_INET6:
1524 			sa6 = (struct sockaddr_in6 *)sa;
1525 			kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1526 				ntohs(sa6->sin6_port),
1527 				sa6->sin6_addr.s6_addr16[0],
1528 				sa6->sin6_addr.s6_addr16[1],
1529 				sa6->sin6_addr.s6_addr16[2],
1530 				sa6->sin6_addr.s6_addr16[3],
1531 				sa6->sin6_addr.s6_addr16[4],
1532 				sa6->sin6_addr.s6_addr16[5],
1533 				sa6->sin6_addr.s6_addr16[6],
1534 				sa6->sin6_addr.s6_addr16[7]
1535 			);
1536 			break;
1537 		default:
1538 			kprintf("AF%d ", sa->sa_family);
1539 			while (len > 0 && sa->sa_data[len-1] == 0)
1540 				--len;
1541 
1542 			for (i = 0; i < len; ++i) {
1543 				if (i)
1544 					kprintf(".");
1545 				kprintf("%d", (unsigned char)sa->sa_data[i]);
1546 			}
1547 			break;
1548 		}
1549 	}
1550 }
1551 
1552 #endif
1553 
1554 /*
1555  * Set up a routing table entry, normally for an interface.
1556  */
1557 int
1558 rtinit(struct ifaddr *ifa, int cmd, int flags)
1559 {
1560 	struct sockaddr *dst, *deldst, *netmask;
1561 	struct mbuf *m = NULL;
1562 	struct radix_node_head *rnh;
1563 	struct radix_node *rn;
1564 	struct rt_addrinfo rtinfo;
1565 	int error;
1566 
1567 	if (flags & RTF_HOST) {
1568 		dst = ifa->ifa_dstaddr;
1569 		netmask = NULL;
1570 	} else {
1571 		dst = ifa->ifa_addr;
1572 		netmask = ifa->ifa_netmask;
1573 	}
1574 	/*
1575 	 * If it's a delete, check that if it exists, it's on the correct
1576 	 * interface or we might scrub a route to another ifa which would
1577 	 * be confusing at best and possibly worse.
1578 	 */
1579 	if (cmd == RTM_DELETE) {
1580 		/*
1581 		 * It's a delete, so it should already exist..
1582 		 * If it's a net, mask off the host bits
1583 		 * (Assuming we have a mask)
1584 		 */
1585 		if (netmask != NULL) {
1586 			m = m_get(MB_DONTWAIT, MT_SONAME);
1587 			if (m == NULL)
1588 				return (ENOBUFS);
1589 			mbuftrackid(m, 34);
1590 			deldst = mtod(m, struct sockaddr *);
1591 			rt_maskedcopy(dst, deldst, netmask);
1592 			dst = deldst;
1593 		}
1594 		/*
1595 		 * Look up an rtentry that is in the routing tree and
1596 		 * contains the correct info.
1597 		 */
1598 		if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1599 		    (rn = rnh->rnh_lookup((char *)dst,
1600 					  (char *)netmask, rnh)) == NULL ||
1601 		    ((struct rtentry *)rn)->rt_ifa != ifa ||
1602 		    !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1603 			if (m != NULL)
1604 				m_free(m);
1605 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1606 		}
1607 		/* XXX */
1608 #if 0
1609 		else {
1610 			/*
1611 			 * One would think that as we are deleting, and we know
1612 			 * it doesn't exist, we could just return at this point
1613 			 * with an "ELSE" clause, but apparently not..
1614 			 */
1615 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1616 		}
1617 #endif
1618 	}
1619 	/*
1620 	 * Do the actual request
1621 	 */
1622 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
1623 	rtinfo.rti_info[RTAX_DST] = dst;
1624 	rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1625 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
1626 	rtinfo.rti_flags = flags | ifa->ifa_flags;
1627 	rtinfo.rti_ifa = ifa;
1628 	error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa);
1629 	if (m != NULL)
1630 		m_free(m);
1631 	return (error);
1632 }
1633 
1634 static void
1635 rtinit_rtrequest_callback(int cmd, int error,
1636 			  struct rt_addrinfo *rtinfo, struct rtentry *rt,
1637 			  void *arg)
1638 {
1639 	struct ifaddr *ifa = arg;
1640 
1641 	if (error == 0 && rt) {
1642 		if (mycpuid == 0) {
1643 			++rt->rt_refcnt;
1644 			rt_newaddrmsg(cmd, ifa, error, rt);
1645 			--rt->rt_refcnt;
1646 		}
1647 		if (cmd == RTM_DELETE) {
1648 			if (rt->rt_refcnt == 0) {
1649 				++rt->rt_refcnt;
1650 				rtfree(rt);
1651 			}
1652 		}
1653 	}
1654 }
1655 
1656 struct netmsg_rts {
1657 	struct netmsg_base	base;
1658 	int			req;
1659 	struct rt_addrinfo	*rtinfo;
1660 	rtsearch_callback_func_t callback;
1661 	void			*arg;
1662 	boolean_t		exact_match;
1663 	int			found_cnt;
1664 };
1665 
1666 int
1667 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1668 		rtsearch_callback_func_t callback, void *arg,
1669 		boolean_t exact_match)
1670 {
1671 	struct netmsg_rts msg;
1672 
1673 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1674 		    0, rtsearch_msghandler);
1675 	msg.req = req;
1676 	msg.rtinfo = rtinfo;
1677 	msg.callback = callback;
1678 	msg.arg = arg;
1679 	msg.exact_match = exact_match;
1680 	msg.found_cnt = 0;
1681 	return lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
1682 }
1683 
1684 static void
1685 rtsearch_msghandler(netmsg_t msg)
1686 {
1687 	struct netmsg_rts *rmsg = (void *)msg;
1688 	struct rt_addrinfo rtinfo;
1689 	struct radix_node_head *rnh;
1690 	struct rtentry *rt;
1691 	int nextcpu, error;
1692 
1693 	/*
1694 	 * Copy the rtinfo.  We need to make sure that the original
1695 	 * rtinfo, which is setup by the caller, in the netmsg will
1696 	 * _not_ be changed; else the next CPU on the netmsg forwarding
1697 	 * path will see a different rtinfo than what this CPU has seen.
1698 	 */
1699 	rtinfo = *rmsg->rtinfo;
1700 
1701 	/*
1702 	 * Find the correct routing tree to use for this Address Family
1703 	 */
1704 	if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1705 		if (mycpuid != 0)
1706 			panic("partially initialized routing tables\n");
1707 		lwkt_replymsg(&rmsg->base.lmsg, EAFNOSUPPORT);
1708 		return;
1709 	}
1710 
1711 	/*
1712 	 * Correct rtinfo for the host route searching.
1713 	 */
1714 	if (rtinfo.rti_flags & RTF_HOST) {
1715 		rtinfo.rti_netmask = NULL;
1716 		rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1717 	}
1718 
1719 	rt = (struct rtentry *)
1720 	     rnh->rnh_lookup((char *)rtinfo.rti_dst,
1721 			     (char *)rtinfo.rti_netmask, rnh);
1722 
1723 	/*
1724 	 * If we are asked to do the "exact match", we need to make sure
1725 	 * that host route searching got a host route while a network
1726 	 * route searching got a network route.
1727 	 */
1728 	if (rt != NULL && rmsg->exact_match &&
1729 	    ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1730 		rt = NULL;
1731 
1732 	if (rt == NULL) {
1733 		/*
1734 		 * No matching routes have been found, don't count this
1735 		 * as a critical error (here, we set 'error' to 0), just
1736 		 * keep moving on, since at least prcloned routes are not
1737 		 * duplicated onto each CPU.
1738 		 */
1739 		error = 0;
1740 	} else {
1741 		rmsg->found_cnt++;
1742 
1743 		rt->rt_refcnt++;
1744 		error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1745 				      rmsg->found_cnt);
1746 		rt->rt_refcnt--;
1747 
1748 		if (error == EJUSTRETURN) {
1749 			lwkt_replymsg(&rmsg->base.lmsg, 0);
1750 			return;
1751 		}
1752 	}
1753 
1754 	nextcpu = mycpuid + 1;
1755 	if (error) {
1756 		KKASSERT(rmsg->found_cnt > 0);
1757 
1758 		/*
1759 		 * Under following cases, unrecoverable error has
1760 		 * not occured:
1761 		 * o  Request is RTM_GET
1762 		 * o  The first time that we find the route, but the
1763 		 *    modification fails.
1764 		 */
1765 		if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1766 			panic("rtsearch_msghandler: unrecoverable error "
1767 			      "cpu %d", mycpuid);
1768 		}
1769 		lwkt_replymsg(&rmsg->base.lmsg, error);
1770 	} else if (nextcpu < ncpus) {
1771 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
1772 	} else {
1773 		if (rmsg->found_cnt == 0) {
1774 			/* The requested route was never seen ... */
1775 			error = ESRCH;
1776 		}
1777 		lwkt_replymsg(&rmsg->base.lmsg, error);
1778 	}
1779 }
1780 
1781 int
1782 rtmask_add_global(struct sockaddr *mask)
1783 {
1784 	struct netmsg_base msg;
1785 
1786 	netmsg_init(&msg, NULL, &curthread->td_msgport,
1787 		    0, rtmask_add_msghandler);
1788 	msg.lmsg.u.ms_resultp = mask;
1789 
1790 	return lwkt_domsg(rtable_portfn(0), &msg.lmsg, 0);
1791 }
1792 
1793 struct sockaddr *
1794 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1795 {
1796 	struct radix_node *n;
1797 
1798 #define	clen(s)	(*(u_char *)(s))
1799 	n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1800 	if (n != NULL &&
1801 	    mask->sa_len >= clen(n->rn_key) &&
1802 	    bcmp((char *)mask + 1,
1803 		 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1804 		return (struct sockaddr *)n->rn_key;
1805 	} else {
1806 		return NULL;
1807 	}
1808 #undef clen
1809 }
1810 
1811 static void
1812 rtmask_add_msghandler(netmsg_t msg)
1813 {
1814 	struct lwkt_msg *lmsg = &msg->lmsg;
1815 	struct sockaddr *mask = lmsg->u.ms_resultp;
1816 	int error = 0, nextcpu;
1817 
1818 	if (rtmask_lookup(mask) == NULL)
1819 		error = ENOBUFS;
1820 
1821 	nextcpu = mycpuid + 1;
1822 	if (!error && nextcpu < ncpus)
1823 		lwkt_forwardmsg(rtable_portfn(nextcpu), lmsg);
1824 	else
1825 		lwkt_replymsg(lmsg, error);
1826 }
1827 
1828 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1829 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1830