xref: /dragonfly/sys/net/route.c (revision bd611623)
1 /*
2  * Copyright (c) 2004, 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Berkeley and its contributors.
49  * 4. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)route.c	8.3 (Berkeley) 1/9/95
66  * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
67  */
68 
69 #include "opt_inet.h"
70 #include "opt_mpls.h"
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/domain.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 #include <sys/globaldata.h>
81 #include <sys/thread.h>
82 
83 #include <net/if.h>
84 #include <net/route.h>
85 #include <net/netisr.h>
86 
87 #include <netinet/in.h>
88 #include <net/ip_mroute/ip_mroute.h>
89 
90 #include <sys/thread2.h>
91 #include <sys/msgport2.h>
92 #include <net/netmsg2.h>
93 
94 #ifdef MPLS
95 #include <netproto/mpls/mpls.h>
96 #endif
97 
98 static struct rtstatistics rtstatistics_percpu[MAXCPU];
99 #define rtstat	rtstatistics_percpu[mycpuid]
100 
101 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
102 struct lwkt_port *rt_ports[MAXCPU];
103 
104 static void	rt_maskedcopy (struct sockaddr *, struct sockaddr *,
105 			       struct sockaddr *);
106 static void rtable_init(void);
107 static void rtable_service_loop(void *dummy);
108 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
109 				      struct rtentry *, void *);
110 
111 static void rtredirect_msghandler(netmsg_t msg);
112 static void rtrequest1_msghandler(netmsg_t msg);
113 static void rtsearch_msghandler(netmsg_t msg);
114 static void rtmask_add_msghandler(netmsg_t msg);
115 
116 static int rt_setshims(struct rtentry *, struct sockaddr **);
117 
118 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
119 
120 #ifdef ROUTE_DEBUG
121 static int route_debug = 1;
122 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
123            &route_debug, 0, "");
124 #endif
125 
126 int route_assert_owner_access = 1;
127 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW,
128            &route_assert_owner_access, 0, "");
129 
130 u_long route_kmalloc_limit = 0;
131 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit);
132 
133 /*
134  * Initialize the route table(s) for protocol domains and
135  * create a helper thread which will be responsible for updating
136  * route table entries on each cpu.
137  */
138 void
139 route_init(void)
140 {
141 	int cpu;
142 	thread_t rtd;
143 
144 	for (cpu = 0; cpu < ncpus; ++cpu)
145 		bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
146 	rn_init();      /* initialize all zeroes, all ones, mask table */
147 	rtable_init();	/* call dom_rtattach() on each cpu */
148 
149 	for (cpu = 0; cpu < ncpus; cpu++) {
150 		lwkt_create(rtable_service_loop, NULL, &rtd, NULL,
151 			    0, cpu, "rtable_cpu %d", cpu);
152 		rt_ports[cpu] = &rtd->td_msgport;
153 	}
154 
155 	if (route_kmalloc_limit)
156 		kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit);
157 }
158 
159 static void
160 rtable_init_oncpu(netmsg_t msg)
161 {
162 	struct domain *dom;
163 	int cpu = mycpuid;
164 
165 	SLIST_FOREACH(dom, &domains, dom_next) {
166 		if (dom->dom_rtattach) {
167 			dom->dom_rtattach(
168 				(void **)&rt_tables[cpu][dom->dom_family],
169 			        dom->dom_rtoffset);
170 		}
171 	}
172 	ifnet_forwardmsg(&msg->lmsg, cpu + 1);
173 }
174 
175 static void
176 rtable_init(void)
177 {
178 	struct netmsg_base msg;
179 
180 	netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
181 	ifnet_domsg(&msg.lmsg, 0);
182 }
183 
184 /*
185  * Our per-cpu table management protocol thread.  All route table operations
186  * are sequentially chained through all cpus starting at cpu #0 in order to
187  * maintain duplicate route tables on each cpu.  Having a spearate route
188  * table management thread allows the protocol and interrupt threads to
189  * issue route table changes.
190  */
191 static void
192 rtable_service_loop(void *dummy __unused)
193 {
194 	netmsg_base_t msg;
195 	thread_t td = curthread;
196 
197 	while ((msg = lwkt_waitport(&td->td_msgport, 0)) != NULL) {
198 		msg->nm_dispatch((netmsg_t)msg);
199 	}
200 }
201 
202 /*
203  * Routing statistics.
204  */
205 static int
206 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
207 {
208 	int cpu, error = 0;
209 
210 	for (cpu = 0; cpu < ncpus; ++cpu) {
211 		if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
212 					sizeof(struct rtstatistics))))
213 				break;
214 		if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
215 					sizeof(struct rtstatistics))))
216 				break;
217 	}
218 
219 	return (error);
220 }
221 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
222 	0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
223 
224 /*
225  * Packet routing routines.
226  */
227 
228 /*
229  * Look up and fill in the "ro_rt" rtentry field in a route structure given
230  * an address in the "ro_dst" field.  Always send a report on a miss and
231  * always clone routes.
232  */
233 void
234 rtalloc(struct route *ro)
235 {
236 	rtalloc_ign(ro, 0UL);
237 }
238 
239 /*
240  * Look up and fill in the "ro_rt" rtentry field in a route structure given
241  * an address in the "ro_dst" field.  Always send a report on a miss and
242  * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
243  * ignored.
244  */
245 void
246 rtalloc_ign(struct route *ro, u_long ignoreflags)
247 {
248 	if (ro->ro_rt != NULL) {
249 		if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
250 			return;
251 		rtfree(ro->ro_rt);
252 		ro->ro_rt = NULL;
253 	}
254 	ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
255 }
256 
257 /*
258  * Look up the route that matches the given "dst" address.
259  *
260  * Route lookup can have the side-effect of creating and returning
261  * a cloned route instead when "dst" matches a cloning route and the
262  * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
263  *
264  * Any route returned has its reference count incremented.
265  */
266 struct rtentry *
267 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
268 {
269 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
270 	struct rtentry *rt;
271 
272 	if (rnh == NULL)
273 		goto unreach;
274 
275 	/*
276 	 * Look up route in the radix tree.
277 	 */
278 	rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
279 	if (rt == NULL)
280 		goto unreach;
281 
282 	/*
283 	 * Handle cloning routes.
284 	 */
285 	if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
286 		struct rtentry *clonedroute;
287 		int error;
288 
289 		clonedroute = rt;	/* copy in/copy out parameter */
290 		error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
291 				  &clonedroute);	/* clone the route */
292 		if (error != 0) {	/* cloning failed */
293 			if (generate_report)
294 				rt_dstmsg(RTM_MISS, dst, error);
295 			rt->rt_refcnt++;
296 			return (rt);	/* return the uncloned route */
297 		}
298 		if (generate_report) {
299 			if (clonedroute->rt_flags & RTF_XRESOLVE)
300 				rt_dstmsg(RTM_RESOLVE, dst, 0);
301 			else
302 				rt_rtmsg(RTM_ADD, clonedroute,
303 					 clonedroute->rt_ifp, 0);
304 		}
305 		return (clonedroute);	/* return cloned route */
306 	}
307 
308 	/*
309 	 * Increment the reference count of the matched route and return.
310 	 */
311 	rt->rt_refcnt++;
312 	return (rt);
313 
314 unreach:
315 	rtstat.rts_unreach++;
316 	if (generate_report)
317 		rt_dstmsg(RTM_MISS, dst, 0);
318 	return (NULL);
319 }
320 
321 void
322 rtfree(struct rtentry *rt)
323 {
324 	if (rt->rt_cpuid == mycpuid)
325 		rtfree_oncpu(rt);
326 	else
327 		rtfree_remote(rt);
328 }
329 
330 void
331 rtfree_oncpu(struct rtentry *rt)
332 {
333 	KKASSERT(rt->rt_cpuid == mycpuid);
334 	KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
335 
336 	--rt->rt_refcnt;
337 	if (rt->rt_refcnt == 0) {
338 		struct radix_node_head *rnh =
339 		    rt_tables[mycpuid][rt_key(rt)->sa_family];
340 
341 		if (rnh->rnh_close)
342 			rnh->rnh_close((struct radix_node *)rt, rnh);
343 		if (!(rt->rt_flags & RTF_UP)) {
344 			/* deallocate route */
345 			if (rt->rt_ifa != NULL)
346 				IFAFREE(rt->rt_ifa);
347 			if (rt->rt_parent != NULL)
348 				RTFREE(rt->rt_parent);	/* recursive call! */
349 			Free(rt_key(rt));
350 			Free(rt);
351 		}
352 	}
353 }
354 
355 static void
356 rtfree_remote_dispatch(netmsg_t msg)
357 {
358 	struct lwkt_msg *lmsg = &msg->lmsg;
359 	struct rtentry *rt = lmsg->u.ms_resultp;
360 
361 	rtfree_oncpu(rt);
362 	lwkt_replymsg(lmsg, 0);
363 }
364 
365 void
366 rtfree_remote(struct rtentry *rt)
367 {
368 	struct netmsg_base msg;
369 	struct lwkt_msg *lmsg;
370 
371 	KKASSERT(rt->rt_cpuid != mycpuid);
372 
373 	if (route_assert_owner_access) {
374 		panic("rt remote free rt_cpuid %d, mycpuid %d",
375 		      rt->rt_cpuid, mycpuid);
376 	} else {
377 		kprintf("rt remote free rt_cpuid %d, mycpuid %d\n",
378 			rt->rt_cpuid, mycpuid);
379 		print_backtrace(-1);
380 	}
381 
382 	netmsg_init(&msg, NULL, &curthread->td_msgport,
383 		    0, rtfree_remote_dispatch);
384 	lmsg = &msg.lmsg;
385 	lmsg->u.ms_resultp = rt;
386 
387 	lwkt_domsg(rtable_portfn(rt->rt_cpuid), lmsg, 0);
388 }
389 
390 static int
391 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
392 		 struct sockaddr *netmask, int flags, struct sockaddr *src)
393 {
394 	struct rtentry *rt = NULL;
395 	struct rt_addrinfo rtinfo;
396 	struct ifaddr *ifa;
397 	u_long *stat = NULL;
398 	int error;
399 
400 	/* verify the gateway is directly reachable */
401 	if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
402 		error = ENETUNREACH;
403 		goto out;
404 	}
405 
406 	/*
407 	 * If the redirect isn't from our current router for this destination,
408 	 * it's either old or wrong.
409 	 */
410 	if (!(flags & RTF_DONE) &&		/* XXX JH */
411 	    (rt = rtpurelookup(dst)) != NULL &&
412 	    (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
413 		error = EINVAL;
414 		goto done;
415 	}
416 
417 	/*
418 	 * If it redirects us to ourselves, we have a routing loop,
419 	 * perhaps as a result of an interface going down recently.
420 	 */
421 	if (ifa_ifwithaddr(gateway)) {
422 		error = EHOSTUNREACH;
423 		goto done;
424 	}
425 
426 	/*
427 	 * Create a new entry if the lookup failed or if we got back
428 	 * a wildcard entry for the default route.  This is necessary
429 	 * for hosts which use routing redirects generated by smart
430 	 * gateways to dynamically build the routing tables.
431 	 */
432 	if (rt == NULL)
433 		goto create;
434 	if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
435 		rtfree(rt);
436 		goto create;
437 	}
438 
439 	/* Ignore redirects for directly connected hosts. */
440 	if (!(rt->rt_flags & RTF_GATEWAY)) {
441 		error = EHOSTUNREACH;
442 		goto done;
443 	}
444 
445 	if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
446 		/*
447 		 * Changing from a network route to a host route.
448 		 * Create a new host route rather than smashing the
449 		 * network route.
450 		 */
451 create:
452 		flags |=  RTF_GATEWAY | RTF_DYNAMIC;
453 		bzero(&rtinfo, sizeof(struct rt_addrinfo));
454 		rtinfo.rti_info[RTAX_DST] = dst;
455 		rtinfo.rti_info[RTAX_GATEWAY] = gateway;
456 		rtinfo.rti_info[RTAX_NETMASK] = netmask;
457 		rtinfo.rti_flags = flags;
458 		rtinfo.rti_ifa = ifa;
459 		rt = NULL;	/* copy-in/copy-out parameter */
460 		error = rtrequest1(RTM_ADD, &rtinfo, &rt);
461 		if (rt != NULL)
462 			flags = rt->rt_flags;
463 		stat = &rtstat.rts_dynamic;
464 	} else {
465 		/*
466 		 * Smash the current notion of the gateway to this destination.
467 		 * Should check about netmask!!!
468 		 */
469 		rt->rt_flags |= RTF_MODIFIED;
470 		flags |= RTF_MODIFIED;
471 
472 		/* We only need to report rtmsg on CPU0 */
473 		rt_setgate(rt, rt_key(rt), gateway,
474 			   mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
475 		error = 0;
476 		stat = &rtstat.rts_newgateway;
477 	}
478 
479 done:
480 	if (rt != NULL)
481 		rtfree(rt);
482 out:
483 	if (error != 0)
484 		rtstat.rts_badredirect++;
485 	else if (stat != NULL)
486 		(*stat)++;
487 
488 	return error;
489 }
490 
491 struct netmsg_rtredirect {
492 	struct netmsg_base base;
493 	struct sockaddr *dst;
494 	struct sockaddr *gateway;
495 	struct sockaddr *netmask;
496 	int		flags;
497 	struct sockaddr *src;
498 };
499 
500 /*
501  * Force a routing table entry to the specified
502  * destination to go through the given gateway.
503  * Normally called as a result of a routing redirect
504  * message from the network layer.
505  *
506  * N.B.: must be called at splnet
507  */
508 void
509 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
510 	   struct sockaddr *netmask, int flags, struct sockaddr *src)
511 {
512 	struct rt_addrinfo rtinfo;
513 	int error;
514 	struct netmsg_rtredirect msg;
515 
516 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
517 		    0, rtredirect_msghandler);
518 	msg.dst = dst;
519 	msg.gateway = gateway;
520 	msg.netmask = netmask;
521 	msg.flags = flags;
522 	msg.src = src;
523 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
524 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
525 	rtinfo.rti_info[RTAX_DST] = dst;
526 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
527 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
528 	rtinfo.rti_info[RTAX_AUTHOR] = src;
529 	rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
530 }
531 
532 static void
533 rtredirect_msghandler(netmsg_t msg)
534 {
535 	struct netmsg_rtredirect *rmsg = (void *)msg;
536 	int nextcpu;
537 
538 	rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
539 			 rmsg->flags, rmsg->src);
540 	nextcpu = mycpuid + 1;
541 	if (nextcpu < ncpus)
542 		lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->lmsg);
543 	else
544 		lwkt_replymsg(&msg->lmsg, 0);
545 }
546 
547 /*
548 * Routing table ioctl interface.
549 */
550 int
551 rtioctl(u_long req, caddr_t data, struct ucred *cred)
552 {
553 #ifdef INET
554 	/* Multicast goop, grrr... */
555 	return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
556 #else
557 	return ENXIO;
558 #endif
559 }
560 
561 struct ifaddr *
562 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
563 {
564 	struct ifaddr *ifa;
565 
566 	if (!(flags & RTF_GATEWAY)) {
567 		/*
568 		 * If we are adding a route to an interface,
569 		 * and the interface is a point-to-point link,
570 		 * we should search for the destination
571 		 * as our clue to the interface.  Otherwise
572 		 * we can use the local address.
573 		 */
574 		ifa = NULL;
575 		if (flags & RTF_HOST) {
576 			ifa = ifa_ifwithdstaddr(dst);
577 		}
578 		if (ifa == NULL)
579 			ifa = ifa_ifwithaddr(gateway);
580 	} else {
581 		/*
582 		 * If we are adding a route to a remote net
583 		 * or host, the gateway may still be on the
584 		 * other end of a pt to pt link.
585 		 */
586 		ifa = ifa_ifwithdstaddr(gateway);
587 	}
588 	if (ifa == NULL)
589 		ifa = ifa_ifwithnet(gateway);
590 	if (ifa == NULL) {
591 		struct rtentry *rt;
592 
593 		rt = rtpurelookup(gateway);
594 		if (rt == NULL)
595 			return (NULL);
596 		rt->rt_refcnt--;
597 		if ((ifa = rt->rt_ifa) == NULL)
598 			return (NULL);
599 	}
600 	if (ifa->ifa_addr->sa_family != dst->sa_family) {
601 		struct ifaddr *oldifa = ifa;
602 
603 		ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
604 		if (ifa == NULL)
605 			ifa = oldifa;
606 	}
607 	return (ifa);
608 }
609 
610 static int rt_fixdelete (struct radix_node *, void *);
611 static int rt_fixchange (struct radix_node *, void *);
612 
613 struct rtfc_arg {
614 	struct rtentry *rt0;
615 	struct radix_node_head *rnh;
616 };
617 
618 /*
619  * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
620  */
621 int
622 rt_getifa(struct rt_addrinfo *rtinfo)
623 {
624 	struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
625 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
626 	struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
627 	int flags = rtinfo->rti_flags;
628 
629 	/*
630 	 * ifp may be specified by sockaddr_dl
631 	 * when protocol address is ambiguous.
632 	 */
633 	if (rtinfo->rti_ifp == NULL) {
634 		struct sockaddr *ifpaddr;
635 
636 		ifpaddr = rtinfo->rti_info[RTAX_IFP];
637 		if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
638 			struct ifaddr *ifa;
639 
640 			ifa = ifa_ifwithnet(ifpaddr);
641 			if (ifa != NULL)
642 				rtinfo->rti_ifp = ifa->ifa_ifp;
643 		}
644 	}
645 
646 	if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
647 		rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
648 	if (rtinfo->rti_ifa == NULL) {
649 		struct sockaddr *sa;
650 
651 		sa = ifaaddr != NULL ? ifaaddr :
652 		    (gateway != NULL ? gateway : dst);
653 		if (sa != NULL && rtinfo->rti_ifp != NULL)
654 			rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
655 		else if (dst != NULL && gateway != NULL)
656 			rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
657 		else if (sa != NULL)
658 			rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
659 	}
660 	if (rtinfo->rti_ifa == NULL)
661 		return (ENETUNREACH);
662 
663 	if (rtinfo->rti_ifp == NULL)
664 		rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
665 	return (0);
666 }
667 
668 /*
669  * Do appropriate manipulations of a routing tree given
670  * all the bits of info needed
671  */
672 int
673 rtrequest(
674 	int req,
675 	struct sockaddr *dst,
676 	struct sockaddr *gateway,
677 	struct sockaddr *netmask,
678 	int flags,
679 	struct rtentry **ret_nrt)
680 {
681 	struct rt_addrinfo rtinfo;
682 
683 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
684 	rtinfo.rti_info[RTAX_DST] = dst;
685 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
686 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
687 	rtinfo.rti_flags = flags;
688 	return rtrequest1(req, &rtinfo, ret_nrt);
689 }
690 
691 int
692 rtrequest_global(
693 	int req,
694 	struct sockaddr *dst,
695 	struct sockaddr *gateway,
696 	struct sockaddr *netmask,
697 	int flags)
698 {
699 	struct rt_addrinfo rtinfo;
700 
701 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
702 	rtinfo.rti_info[RTAX_DST] = dst;
703 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
704 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
705 	rtinfo.rti_flags = flags;
706 	return rtrequest1_global(req, &rtinfo, NULL, NULL);
707 }
708 
709 struct netmsg_rtq {
710 	struct netmsg_base	base;
711 	int			req;
712 	struct rt_addrinfo	*rtinfo;
713 	rtrequest1_callback_func_t callback;
714 	void			*arg;
715 };
716 
717 int
718 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
719 		  rtrequest1_callback_func_t callback, void *arg)
720 {
721 	int error;
722 	struct netmsg_rtq msg;
723 
724 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
725 		    0, rtrequest1_msghandler);
726 	msg.base.lmsg.ms_error = -1;
727 	msg.req = req;
728 	msg.rtinfo = rtinfo;
729 	msg.callback = callback;
730 	msg.arg = arg;
731 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
732 	return (error);
733 }
734 
735 /*
736  * Handle a route table request on the current cpu.  Since the route table's
737  * are supposed to be identical on each cpu, an error occuring later in the
738  * message chain is considered system-fatal.
739  */
740 static void
741 rtrequest1_msghandler(netmsg_t msg)
742 {
743 	struct netmsg_rtq *rmsg = (void *)msg;
744 	struct rt_addrinfo rtinfo;
745 	struct rtentry *rt = NULL;
746 	int nextcpu;
747 	int error;
748 
749 	/*
750 	 * Copy the rtinfo.  We need to make sure that the original
751 	 * rtinfo, which is setup by the caller, in the netmsg will
752 	 * _not_ be changed; else the next CPU on the netmsg forwarding
753 	 * path will see a different rtinfo than what this CPU has seen.
754 	 */
755 	rtinfo = *rmsg->rtinfo;
756 
757 	error = rtrequest1(rmsg->req, &rtinfo, &rt);
758 	if (rt)
759 		--rt->rt_refcnt;
760 	if (rmsg->callback)
761 		rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
762 
763 	/*
764 	 * RTM_DELETE's are propogated even if an error occurs, since a
765 	 * cloned route might be undergoing deletion and cloned routes
766 	 * are not necessarily replicated.  An overall error is returned
767 	 * only if no cpus have the route in question.
768 	 */
769 	if (rmsg->base.lmsg.ms_error < 0 || error == 0)
770 		rmsg->base.lmsg.ms_error = error;
771 
772 	nextcpu = mycpuid + 1;
773 	if (error && rmsg->req != RTM_DELETE) {
774 		if (mycpuid != 0) {
775 			panic("rtrequest1_msghandler: rtrequest table "
776 			      "error was cpu%d, err %d\n", mycpuid, error);
777 		}
778 		lwkt_replymsg(&rmsg->base.lmsg, error);
779 	} else if (nextcpu < ncpus) {
780 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
781 	} else {
782 		lwkt_replymsg(&rmsg->base.lmsg, rmsg->base.lmsg.ms_error);
783 	}
784 }
785 
786 int
787 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
788 {
789 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
790 	struct rtentry *rt;
791 	struct radix_node *rn;
792 	struct radix_node_head *rnh;
793 	struct ifaddr *ifa;
794 	struct sockaddr *ndst;
795 	boolean_t reportmsg;
796 	int error = 0;
797 
798 #define gotoerr(x) { error = x ; goto bad; }
799 
800 #ifdef ROUTE_DEBUG
801 	if (route_debug)
802 		rt_addrinfo_print(req, rtinfo);
803 #endif
804 
805 	crit_enter();
806 	/*
807 	 * Find the correct routing tree to use for this Address Family
808 	 */
809 	if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
810 		gotoerr(EAFNOSUPPORT);
811 
812 	/*
813 	 * If we are adding a host route then we don't want to put
814 	 * a netmask in the tree, nor do we want to clone it.
815 	 */
816 	if (rtinfo->rti_flags & RTF_HOST) {
817 		rtinfo->rti_info[RTAX_NETMASK] = NULL;
818 		rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
819 	}
820 
821 	switch (req) {
822 	case RTM_DELETE:
823 		/* Remove the item from the tree. */
824 		rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
825 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
826 				      rnh);
827 		if (rn == NULL)
828 			gotoerr(ESRCH);
829 		KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
830 			("rnh_deladdr returned flags 0x%x", rn->rn_flags));
831 		rt = (struct rtentry *)rn;
832 
833 		/* ref to prevent a deletion race */
834 		++rt->rt_refcnt;
835 
836 		/* Free any routes cloned from this one. */
837 		if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
838 		    rt_mask(rt) != NULL) {
839 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
840 					       (char *)rt_mask(rt),
841 					       rt_fixdelete, rt);
842 		}
843 
844 		if (rt->rt_gwroute != NULL) {
845 			RTFREE(rt->rt_gwroute);
846 			rt->rt_gwroute = NULL;
847 		}
848 
849 		/*
850 		 * NB: RTF_UP must be set during the search above,
851 		 * because we might delete the last ref, causing
852 		 * rt to get freed prematurely.
853 		 */
854 		rt->rt_flags &= ~RTF_UP;
855 
856 #ifdef ROUTE_DEBUG
857 		if (route_debug)
858 			rt_print(rtinfo, rt);
859 #endif
860 
861 		/* Give the protocol a chance to keep things in sync. */
862 		if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
863 			ifa->ifa_rtrequest(RTM_DELETE, rt, rtinfo);
864 
865 		/*
866 		 * If the caller wants it, then it can have it,
867 		 * but it's up to it to free the rtentry as we won't be
868 		 * doing it.
869 		 */
870 		KASSERT(rt->rt_refcnt >= 0,
871 			("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
872 		if (ret_nrt != NULL) {
873 			/* leave ref intact for return */
874 			*ret_nrt = rt;
875 		} else {
876 			/* deref / attempt to destroy */
877 			rtfree(rt);
878 		}
879 		break;
880 
881 	case RTM_RESOLVE:
882 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
883 			gotoerr(EINVAL);
884 		ifa = rt->rt_ifa;
885 		rtinfo->rti_flags =
886 		    rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
887 		rtinfo->rti_flags |= RTF_WASCLONED;
888 		rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
889 		if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
890 			rtinfo->rti_flags |= RTF_HOST;
891 		rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
892 		rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
893 		rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
894 		goto makeroute;
895 
896 	case RTM_ADD:
897 		KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
898 			rtinfo->rti_info[RTAX_GATEWAY] != NULL,
899 		    ("rtrequest: GATEWAY but no gateway"));
900 
901 		if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
902 			gotoerr(error);
903 		ifa = rtinfo->rti_ifa;
904 makeroute:
905 		R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
906 		if (rt == NULL) {
907 			if (req == RTM_ADD) {
908 				kprintf("rtrequest1: alloc rtentry failed on "
909 				    "cpu%d\n", mycpuid);
910 			}
911 			gotoerr(ENOBUFS);
912 		}
913 		bzero(rt, sizeof(struct rtentry));
914 		rt->rt_flags = RTF_UP | rtinfo->rti_flags;
915 		rt->rt_cpuid = mycpuid;
916 
917 		if (mycpuid != 0 && req == RTM_ADD) {
918 			/* For RTM_ADD, we have already sent rtmsg on CPU0. */
919 			reportmsg = RTL_DONTREPORT;
920 		} else {
921 			/*
922 			 * For RTM_ADD, we only send rtmsg on CPU0.
923 			 * For RTM_RESOLVE, we always send rtmsg. XXX
924 			 */
925 			reportmsg = RTL_REPORTMSG;
926 		}
927 		error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
928 				   reportmsg);
929 		if (error != 0) {
930 			Free(rt);
931 			gotoerr(error);
932 		}
933 
934 		ndst = rt_key(rt);
935 		if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
936 			rt_maskedcopy(dst, ndst,
937 				      rtinfo->rti_info[RTAX_NETMASK]);
938 		else
939 			bcopy(dst, ndst, dst->sa_len);
940 
941 		if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
942 			rt_setshims(rt, rtinfo->rti_info);
943 
944 		/*
945 		 * Note that we now have a reference to the ifa.
946 		 * This moved from below so that rnh->rnh_addaddr() can
947 		 * examine the ifa and  ifa->ifa_ifp if it so desires.
948 		 */
949 		IFAREF(ifa);
950 		rt->rt_ifa = ifa;
951 		rt->rt_ifp = ifa->ifa_ifp;
952 		/* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
953 
954 		rn = rnh->rnh_addaddr((char *)ndst,
955 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
956 				      rnh, rt->rt_nodes);
957 		if (rn == NULL) {
958 			struct rtentry *oldrt;
959 
960 			/*
961 			 * We already have one of these in the tree.
962 			 * We do a special hack: if the old route was
963 			 * cloned, then we blow it away and try
964 			 * re-inserting the new one.
965 			 */
966 			oldrt = rtpurelookup(ndst);
967 			if (oldrt != NULL) {
968 				--oldrt->rt_refcnt;
969 				if (oldrt->rt_flags & RTF_WASCLONED) {
970 					rtrequest(RTM_DELETE, rt_key(oldrt),
971 						  oldrt->rt_gateway,
972 						  rt_mask(oldrt),
973 						  oldrt->rt_flags, NULL);
974 					rn = rnh->rnh_addaddr((char *)ndst,
975 					    (char *)
976 						rtinfo->rti_info[RTAX_NETMASK],
977 					    rnh, rt->rt_nodes);
978 				}
979 			}
980 		}
981 
982 		/*
983 		 * If it still failed to go into the tree,
984 		 * then un-make it (this should be a function).
985 		 */
986 		if (rn == NULL) {
987 			if (rt->rt_gwroute != NULL)
988 				rtfree(rt->rt_gwroute);
989 			IFAFREE(ifa);
990 			Free(rt_key(rt));
991 			Free(rt);
992 			gotoerr(EEXIST);
993 		}
994 
995 		/*
996 		 * If we got here from RESOLVE, then we are cloning
997 		 * so clone the rest, and note that we
998 		 * are a clone (and increment the parent's references)
999 		 */
1000 		if (req == RTM_RESOLVE) {
1001 			rt->rt_rmx = (*ret_nrt)->rt_rmx;    /* copy metrics */
1002 			rt->rt_rmx.rmx_pksent = 0;  /* reset packet counter */
1003 			if ((*ret_nrt)->rt_flags &
1004 				       (RTF_CLONING | RTF_PRCLONING)) {
1005 				rt->rt_parent = *ret_nrt;
1006 				(*ret_nrt)->rt_refcnt++;
1007 			}
1008 		}
1009 
1010 		/*
1011 		 * if this protocol has something to add to this then
1012 		 * allow it to do that as well.
1013 		 */
1014 		if (ifa->ifa_rtrequest != NULL)
1015 			ifa->ifa_rtrequest(req, rt, rtinfo);
1016 
1017 		/*
1018 		 * We repeat the same procedure from rt_setgate() here because
1019 		 * it doesn't fire when we call it there because the node
1020 		 * hasn't been added to the tree yet.
1021 		 */
1022 		if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
1023 		    rt_mask(rt) != NULL) {
1024 			struct rtfc_arg arg = { rt, rnh };
1025 
1026 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1027 					       (char *)rt_mask(rt),
1028 					       rt_fixchange, &arg);
1029 		}
1030 
1031 #ifdef ROUTE_DEBUG
1032 		if (route_debug)
1033 			rt_print(rtinfo, rt);
1034 #endif
1035 		/*
1036 		 * Return the resulting rtentry,
1037 		 * increasing the number of references by one.
1038 		 */
1039 		if (ret_nrt != NULL) {
1040 			rt->rt_refcnt++;
1041 			*ret_nrt = rt;
1042 		}
1043 		break;
1044 	default:
1045 		error = EOPNOTSUPP;
1046 	}
1047 bad:
1048 #ifdef ROUTE_DEBUG
1049 	if (route_debug) {
1050 		if (error)
1051 			kprintf("rti %p failed error %d\n", rtinfo, error);
1052 		else
1053 			kprintf("rti %p succeeded\n", rtinfo);
1054 	}
1055 #endif
1056 	crit_exit();
1057 	return (error);
1058 }
1059 
1060 /*
1061  * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1062  * (i.e., the routes related to it by the operation of cloning).  This
1063  * routine is iterated over all potential former-child-routes by way of
1064  * rnh->rnh_walktree_from() above, and those that actually are children of
1065  * the late parent (passed in as VP here) are themselves deleted.
1066  */
1067 static int
1068 rt_fixdelete(struct radix_node *rn, void *vp)
1069 {
1070 	struct rtentry *rt = (struct rtentry *)rn;
1071 	struct rtentry *rt0 = vp;
1072 
1073 	if (rt->rt_parent == rt0 &&
1074 	    !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1075 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1076 				 rt->rt_flags, NULL);
1077 	}
1078 	return 0;
1079 }
1080 
1081 /*
1082  * This routine is called from rt_setgate() to do the analogous thing for
1083  * adds and changes.  There is the added complication in this case of a
1084  * middle insert; i.e., insertion of a new network route between an older
1085  * network route and (cloned) host routes.  For this reason, a simple check
1086  * of rt->rt_parent is insufficient; each candidate route must be tested
1087  * against the (mask, value) of the new route (passed as before in vp)
1088  * to see if the new route matches it.
1089  *
1090  * XXX - it may be possible to do fixdelete() for changes and reserve this
1091  * routine just for adds.  I'm not sure why I thought it was necessary to do
1092  * changes this way.
1093  */
1094 #ifdef DEBUG
1095 static int rtfcdebug = 0;
1096 #endif
1097 
1098 static int
1099 rt_fixchange(struct radix_node *rn, void *vp)
1100 {
1101 	struct rtentry *rt = (struct rtentry *)rn;
1102 	struct rtfc_arg *ap = vp;
1103 	struct rtentry *rt0 = ap->rt0;
1104 	struct radix_node_head *rnh = ap->rnh;
1105 	u_char *xk1, *xm1, *xk2, *xmp;
1106 	int i, len, mlen;
1107 
1108 #ifdef DEBUG
1109 	if (rtfcdebug)
1110 		kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1111 #endif
1112 
1113 	if (rt->rt_parent == NULL ||
1114 	    (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1115 #ifdef DEBUG
1116 		if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1117 #endif
1118 		return 0;
1119 	}
1120 
1121 	if (rt->rt_parent == rt0) {
1122 #ifdef DEBUG
1123 		if (rtfcdebug) kprintf("parent match\n");
1124 #endif
1125 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1126 				 rt->rt_flags, NULL);
1127 	}
1128 
1129 	/*
1130 	 * There probably is a function somewhere which does this...
1131 	 * if not, there should be.
1132 	 */
1133 	len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1134 
1135 	xk1 = (u_char *)rt_key(rt0);
1136 	xm1 = (u_char *)rt_mask(rt0);
1137 	xk2 = (u_char *)rt_key(rt);
1138 
1139 	/* avoid applying a less specific route */
1140 	xmp = (u_char *)rt_mask(rt->rt_parent);
1141 	mlen = rt_key(rt->rt_parent)->sa_len;
1142 	if (mlen > rt_key(rt0)->sa_len) {
1143 #ifdef DEBUG
1144 		if (rtfcdebug)
1145 			kprintf("rt_fixchange: inserting a less "
1146 			       "specific route\n");
1147 #endif
1148 		return 0;
1149 	}
1150 	for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1151 		if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1152 #ifdef DEBUG
1153 			if (rtfcdebug)
1154 				kprintf("rt_fixchange: inserting a less "
1155 				       "specific route\n");
1156 #endif
1157 			return 0;
1158 		}
1159 	}
1160 
1161 	for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1162 		if ((xk2[i] & xm1[i]) != xk1[i]) {
1163 #ifdef DEBUG
1164 			if (rtfcdebug) kprintf("no match\n");
1165 #endif
1166 			return 0;
1167 		}
1168 	}
1169 
1170 	/*
1171 	 * OK, this node is a clone, and matches the node currently being
1172 	 * changed/added under the node's mask.  So, get rid of it.
1173 	 */
1174 #ifdef DEBUG
1175 	if (rtfcdebug) kprintf("deleting\n");
1176 #endif
1177 	return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1178 			 rt->rt_flags, NULL);
1179 }
1180 
1181 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
1182 
1183 int
1184 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1185 	   boolean_t generate_report)
1186 {
1187 	char *space, *oldspace;
1188 	int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
1189 	struct rtentry *rt = rt0;
1190 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1191 
1192 	/*
1193 	 * A host route with the destination equal to the gateway
1194 	 * will interfere with keeping LLINFO in the routing
1195 	 * table, so disallow it.
1196 	 */
1197 	if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1198 			      (RTF_HOST | RTF_GATEWAY)) &&
1199 	    dst->sa_len == gate->sa_len &&
1200 	    sa_equal(dst, gate)) {
1201 		/*
1202 		 * The route might already exist if this is an RTM_CHANGE
1203 		 * or a routing redirect, so try to delete it.
1204 		 */
1205 		if (rt_key(rt0) != NULL)
1206 			rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1207 				  rt_mask(rt0), rt0->rt_flags, NULL);
1208 		return EADDRNOTAVAIL;
1209 	}
1210 
1211 	/*
1212 	 * Both dst and gateway are stored in the same malloc'ed chunk
1213 	 * (If I ever get my hands on....)
1214 	 * if we need to malloc a new chunk, then keep the old one around
1215 	 * till we don't need it any more.
1216 	 */
1217 	if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
1218 		oldspace = (char *)rt_key(rt);
1219 		R_Malloc(space, char *, dlen + glen);
1220 		if (space == NULL)
1221 			return ENOBUFS;
1222 		rt->rt_nodes->rn_key = space;
1223 	} else {
1224 		space = (char *)rt_key(rt);	/* Just use the old space. */
1225 		oldspace = NULL;
1226 	}
1227 
1228 	/* Set the gateway value. */
1229 	rt->rt_gateway = (struct sockaddr *)(space + dlen);
1230 	bcopy(gate, rt->rt_gateway, glen);
1231 
1232 	if (oldspace != NULL) {
1233 		/*
1234 		 * If we allocated a new chunk, preserve the original dst.
1235 		 * This way, rt_setgate() really just sets the gate
1236 		 * and leaves the dst field alone.
1237 		 */
1238 		bcopy(dst, space, dlen);
1239 		Free(oldspace);
1240 	}
1241 
1242 	/*
1243 	 * If there is already a gwroute, it's now almost definitely wrong
1244 	 * so drop it.
1245 	 */
1246 	if (rt->rt_gwroute != NULL) {
1247 		RTFREE(rt->rt_gwroute);
1248 		rt->rt_gwroute = NULL;
1249 	}
1250 	if (rt->rt_flags & RTF_GATEWAY) {
1251 		/*
1252 		 * Cloning loop avoidance: In the presence of
1253 		 * protocol-cloning and bad configuration, it is
1254 		 * possible to get stuck in bottomless mutual recursion
1255 		 * (rtrequest rt_setgate rtlookup).  We avoid this
1256 		 * by not allowing protocol-cloning to operate for
1257 		 * gateways (which is probably the correct choice
1258 		 * anyway), and avoid the resulting reference loops
1259 		 * by disallowing any route to run through itself as
1260 		 * a gateway.  This is obviously mandatory when we
1261 		 * get rt->rt_output().
1262 		 *
1263 		 * This breaks TTCP for hosts outside the gateway!  XXX JH
1264 		 */
1265 		rt->rt_gwroute = _rtlookup(gate, generate_report,
1266 					   RTF_PRCLONING);
1267 		if (rt->rt_gwroute == rt) {
1268 			rt->rt_gwroute = NULL;
1269 			--rt->rt_refcnt;
1270 			return EDQUOT; /* failure */
1271 		}
1272 	}
1273 
1274 	/*
1275 	 * This isn't going to do anything useful for host routes, so
1276 	 * don't bother.  Also make sure we have a reasonable mask
1277 	 * (we don't yet have one during adds).
1278 	 */
1279 	if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1280 		struct rtfc_arg arg = { rt, rnh };
1281 
1282 		rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1283 				       (char *)rt_mask(rt),
1284 				       rt_fixchange, &arg);
1285 	}
1286 
1287 	return 0;
1288 }
1289 
1290 static void
1291 rt_maskedcopy(
1292 	struct sockaddr *src,
1293 	struct sockaddr *dst,
1294 	struct sockaddr *netmask)
1295 {
1296 	u_char *cp1 = (u_char *)src;
1297 	u_char *cp2 = (u_char *)dst;
1298 	u_char *cp3 = (u_char *)netmask;
1299 	u_char *cplim = cp2 + *cp3;
1300 	u_char *cplim2 = cp2 + *cp1;
1301 
1302 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1303 	cp3 += 2;
1304 	if (cplim > cplim2)
1305 		cplim = cplim2;
1306 	while (cp2 < cplim)
1307 		*cp2++ = *cp1++ & *cp3++;
1308 	if (cp2 < cplim2)
1309 		bzero(cp2, cplim2 - cp2);
1310 }
1311 
1312 int
1313 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1314 {
1315 	struct rtentry *up_rt, *rt;
1316 
1317 	if (!(rt0->rt_flags & RTF_UP)) {
1318 		up_rt = rtlookup(dst);
1319 		if (up_rt == NULL)
1320 			return (EHOSTUNREACH);
1321 		up_rt->rt_refcnt--;
1322 	} else
1323 		up_rt = rt0;
1324 	if (up_rt->rt_flags & RTF_GATEWAY) {
1325 		if (up_rt->rt_gwroute == NULL) {
1326 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1327 			if (up_rt->rt_gwroute == NULL)
1328 				return (EHOSTUNREACH);
1329 		} else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1330 			rtfree(up_rt->rt_gwroute);
1331 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1332 			if (up_rt->rt_gwroute == NULL)
1333 				return (EHOSTUNREACH);
1334 		}
1335 		rt = up_rt->rt_gwroute;
1336 	} else
1337 		rt = up_rt;
1338 	if (rt->rt_flags & RTF_REJECT &&
1339 	    (rt->rt_rmx.rmx_expire == 0 ||		/* rt doesn't expire */
1340 	     time_second < rt->rt_rmx.rmx_expire))	/* rt not expired */
1341 		return (rt->rt_flags & RTF_HOST ?  EHOSTDOWN : EHOSTUNREACH);
1342 	*drt = rt;
1343 	return 0;
1344 }
1345 
1346 static int
1347 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1348 	int i;
1349 
1350 	for (i=0; i<3; i++) {
1351 		struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1352 		int shimlen;
1353 
1354 		if (shim == NULL)
1355 			break;
1356 
1357 		shimlen = ROUNDUP(shim->sa_len);
1358 		R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1359 		bcopy(shim, rt->rt_shim[i], shimlen);
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 #ifdef ROUTE_DEBUG
1366 
1367 /*
1368  * Print out a route table entry
1369  */
1370 void
1371 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1372 {
1373 	kprintf("rti %p cpu %d route %p flags %08lx: ",
1374 		rtinfo, mycpuid, rn, rn->rt_flags);
1375 	sockaddr_print(rt_key(rn));
1376 	kprintf(" mask ");
1377 	sockaddr_print(rt_mask(rn));
1378 	kprintf(" gw ");
1379 	sockaddr_print(rn->rt_gateway);
1380 	kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1381 	kprintf(" ifa %p\n", rn->rt_ifa);
1382 }
1383 
1384 void
1385 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1386 {
1387 	int didit = 0;
1388 	int i;
1389 
1390 #ifdef ROUTE_DEBUG
1391 	if (cmd == RTM_DELETE && route_debug > 1)
1392 		print_backtrace(-1);
1393 #endif
1394 
1395 	switch(cmd) {
1396 	case RTM_ADD:
1397 		kprintf("ADD ");
1398 		break;
1399 	case RTM_RESOLVE:
1400 		kprintf("RES ");
1401 		break;
1402 	case RTM_DELETE:
1403 		kprintf("DEL ");
1404 		break;
1405 	default:
1406 		kprintf("C%02d ", cmd);
1407 		break;
1408 	}
1409 	kprintf("rti %p cpu %d ", rti, mycpuid);
1410 	for (i = 0; i < rti->rti_addrs; ++i) {
1411 		if (rti->rti_info[i] == NULL)
1412 			continue;
1413 		if (didit)
1414 			kprintf(" ,");
1415 		switch(i) {
1416 		case RTAX_DST:
1417 			kprintf("(DST ");
1418 			break;
1419 		case RTAX_GATEWAY:
1420 			kprintf("(GWY ");
1421 			break;
1422 		case RTAX_NETMASK:
1423 			kprintf("(MSK ");
1424 			break;
1425 		case RTAX_GENMASK:
1426 			kprintf("(GEN ");
1427 			break;
1428 		case RTAX_IFP:
1429 			kprintf("(IFP ");
1430 			break;
1431 		case RTAX_IFA:
1432 			kprintf("(IFA ");
1433 			break;
1434 		case RTAX_AUTHOR:
1435 			kprintf("(AUT ");
1436 			break;
1437 		case RTAX_BRD:
1438 			kprintf("(BRD ");
1439 			break;
1440 		default:
1441 			kprintf("(?%02d ", i);
1442 			break;
1443 		}
1444 		sockaddr_print(rti->rti_info[i]);
1445 		kprintf(")");
1446 		didit = 1;
1447 	}
1448 	kprintf("\n");
1449 }
1450 
1451 void
1452 sockaddr_print(struct sockaddr *sa)
1453 {
1454 	struct sockaddr_in *sa4;
1455 	struct sockaddr_in6 *sa6;
1456 	int len;
1457 	int i;
1458 
1459 	if (sa == NULL) {
1460 		kprintf("NULL");
1461 		return;
1462 	}
1463 
1464 	len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1465 
1466 	switch(sa->sa_family) {
1467 	case AF_INET:
1468 	case AF_INET6:
1469 	default:
1470 		switch(sa->sa_family) {
1471 		case AF_INET:
1472 			sa4 = (struct sockaddr_in *)sa;
1473 			kprintf("INET %d %d.%d.%d.%d",
1474 				ntohs(sa4->sin_port),
1475 				(ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1476 				(ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1477 				(ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1478 				(ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1479 			);
1480 			break;
1481 		case AF_INET6:
1482 			sa6 = (struct sockaddr_in6 *)sa;
1483 			kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1484 				ntohs(sa6->sin6_port),
1485 				sa6->sin6_addr.s6_addr16[0],
1486 				sa6->sin6_addr.s6_addr16[1],
1487 				sa6->sin6_addr.s6_addr16[2],
1488 				sa6->sin6_addr.s6_addr16[3],
1489 				sa6->sin6_addr.s6_addr16[4],
1490 				sa6->sin6_addr.s6_addr16[5],
1491 				sa6->sin6_addr.s6_addr16[6],
1492 				sa6->sin6_addr.s6_addr16[7]
1493 			);
1494 			break;
1495 		default:
1496 			kprintf("AF%d ", sa->sa_family);
1497 			while (len > 0 && sa->sa_data[len-1] == 0)
1498 				--len;
1499 
1500 			for (i = 0; i < len; ++i) {
1501 				if (i)
1502 					kprintf(".");
1503 				kprintf("%d", (unsigned char)sa->sa_data[i]);
1504 			}
1505 			break;
1506 		}
1507 	}
1508 }
1509 
1510 #endif
1511 
1512 /*
1513  * Set up a routing table entry, normally for an interface.
1514  */
1515 int
1516 rtinit(struct ifaddr *ifa, int cmd, int flags)
1517 {
1518 	struct sockaddr *dst, *deldst, *netmask;
1519 	struct mbuf *m = NULL;
1520 	struct radix_node_head *rnh;
1521 	struct radix_node *rn;
1522 	struct rt_addrinfo rtinfo;
1523 	int error;
1524 
1525 	if (flags & RTF_HOST) {
1526 		dst = ifa->ifa_dstaddr;
1527 		netmask = NULL;
1528 	} else {
1529 		dst = ifa->ifa_addr;
1530 		netmask = ifa->ifa_netmask;
1531 	}
1532 	/*
1533 	 * If it's a delete, check that if it exists, it's on the correct
1534 	 * interface or we might scrub a route to another ifa which would
1535 	 * be confusing at best and possibly worse.
1536 	 */
1537 	if (cmd == RTM_DELETE) {
1538 		/*
1539 		 * It's a delete, so it should already exist..
1540 		 * If it's a net, mask off the host bits
1541 		 * (Assuming we have a mask)
1542 		 */
1543 		if (netmask != NULL) {
1544 			m = m_get(MB_DONTWAIT, MT_SONAME);
1545 			if (m == NULL)
1546 				return (ENOBUFS);
1547 			mbuftrackid(m, 34);
1548 			deldst = mtod(m, struct sockaddr *);
1549 			rt_maskedcopy(dst, deldst, netmask);
1550 			dst = deldst;
1551 		}
1552 		/*
1553 		 * Look up an rtentry that is in the routing tree and
1554 		 * contains the correct info.
1555 		 */
1556 		if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1557 		    (rn = rnh->rnh_lookup((char *)dst,
1558 					  (char *)netmask, rnh)) == NULL ||
1559 		    ((struct rtentry *)rn)->rt_ifa != ifa ||
1560 		    !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1561 			if (m != NULL)
1562 				m_free(m);
1563 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1564 		}
1565 		/* XXX */
1566 #if 0
1567 		else {
1568 			/*
1569 			 * One would think that as we are deleting, and we know
1570 			 * it doesn't exist, we could just return at this point
1571 			 * with an "ELSE" clause, but apparently not..
1572 			 */
1573 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1574 		}
1575 #endif
1576 	}
1577 	/*
1578 	 * Do the actual request
1579 	 */
1580 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
1581 	rtinfo.rti_info[RTAX_DST] = dst;
1582 	rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1583 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
1584 	rtinfo.rti_flags = flags | ifa->ifa_flags;
1585 	rtinfo.rti_ifa = ifa;
1586 	error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa);
1587 	if (m != NULL)
1588 		m_free(m);
1589 	return (error);
1590 }
1591 
1592 static void
1593 rtinit_rtrequest_callback(int cmd, int error,
1594 			  struct rt_addrinfo *rtinfo, struct rtentry *rt,
1595 			  void *arg)
1596 {
1597 	struct ifaddr *ifa = arg;
1598 
1599 	if (error == 0 && rt) {
1600 		if (mycpuid == 0) {
1601 			++rt->rt_refcnt;
1602 			rt_newaddrmsg(cmd, ifa, error, rt);
1603 			--rt->rt_refcnt;
1604 		}
1605 		if (cmd == RTM_DELETE) {
1606 			if (rt->rt_refcnt == 0) {
1607 				++rt->rt_refcnt;
1608 				rtfree(rt);
1609 			}
1610 		}
1611 	}
1612 }
1613 
1614 struct netmsg_rts {
1615 	struct netmsg_base	base;
1616 	int			req;
1617 	struct rt_addrinfo	*rtinfo;
1618 	rtsearch_callback_func_t callback;
1619 	void			*arg;
1620 	boolean_t		exact_match;
1621 	int			found_cnt;
1622 };
1623 
1624 int
1625 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1626 		rtsearch_callback_func_t callback, void *arg,
1627 		boolean_t exact_match)
1628 {
1629 	struct netmsg_rts msg;
1630 
1631 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1632 		    0, rtsearch_msghandler);
1633 	msg.req = req;
1634 	msg.rtinfo = rtinfo;
1635 	msg.callback = callback;
1636 	msg.arg = arg;
1637 	msg.exact_match = exact_match;
1638 	msg.found_cnt = 0;
1639 	return lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
1640 }
1641 
1642 static void
1643 rtsearch_msghandler(netmsg_t msg)
1644 {
1645 	struct netmsg_rts *rmsg = (void *)msg;
1646 	struct rt_addrinfo rtinfo;
1647 	struct radix_node_head *rnh;
1648 	struct rtentry *rt;
1649 	int nextcpu, error;
1650 
1651 	/*
1652 	 * Copy the rtinfo.  We need to make sure that the original
1653 	 * rtinfo, which is setup by the caller, in the netmsg will
1654 	 * _not_ be changed; else the next CPU on the netmsg forwarding
1655 	 * path will see a different rtinfo than what this CPU has seen.
1656 	 */
1657 	rtinfo = *rmsg->rtinfo;
1658 
1659 	/*
1660 	 * Find the correct routing tree to use for this Address Family
1661 	 */
1662 	if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1663 		if (mycpuid != 0)
1664 			panic("partially initialized routing tables");
1665 		lwkt_replymsg(&rmsg->base.lmsg, EAFNOSUPPORT);
1666 		return;
1667 	}
1668 
1669 	/*
1670 	 * Correct rtinfo for the host route searching.
1671 	 */
1672 	if (rtinfo.rti_flags & RTF_HOST) {
1673 		rtinfo.rti_netmask = NULL;
1674 		rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1675 	}
1676 
1677 	rt = (struct rtentry *)
1678 	     rnh->rnh_lookup((char *)rtinfo.rti_dst,
1679 			     (char *)rtinfo.rti_netmask, rnh);
1680 
1681 	/*
1682 	 * If we are asked to do the "exact match", we need to make sure
1683 	 * that host route searching got a host route while a network
1684 	 * route searching got a network route.
1685 	 */
1686 	if (rt != NULL && rmsg->exact_match &&
1687 	    ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1688 		rt = NULL;
1689 
1690 	if (rt == NULL) {
1691 		/*
1692 		 * No matching routes have been found, don't count this
1693 		 * as a critical error (here, we set 'error' to 0), just
1694 		 * keep moving on, since at least prcloned routes are not
1695 		 * duplicated onto each CPU.
1696 		 */
1697 		error = 0;
1698 	} else {
1699 		rmsg->found_cnt++;
1700 
1701 		rt->rt_refcnt++;
1702 		error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1703 				      rmsg->found_cnt);
1704 		rt->rt_refcnt--;
1705 
1706 		if (error == EJUSTRETURN) {
1707 			lwkt_replymsg(&rmsg->base.lmsg, 0);
1708 			return;
1709 		}
1710 	}
1711 
1712 	nextcpu = mycpuid + 1;
1713 	if (error) {
1714 		KKASSERT(rmsg->found_cnt > 0);
1715 
1716 		/*
1717 		 * Under following cases, unrecoverable error has
1718 		 * not occured:
1719 		 * o  Request is RTM_GET
1720 		 * o  The first time that we find the route, but the
1721 		 *    modification fails.
1722 		 */
1723 		if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1724 			panic("rtsearch_msghandler: unrecoverable error "
1725 			      "cpu %d", mycpuid);
1726 		}
1727 		lwkt_replymsg(&rmsg->base.lmsg, error);
1728 	} else if (nextcpu < ncpus) {
1729 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
1730 	} else {
1731 		if (rmsg->found_cnt == 0) {
1732 			/* The requested route was never seen ... */
1733 			error = ESRCH;
1734 		}
1735 		lwkt_replymsg(&rmsg->base.lmsg, error);
1736 	}
1737 }
1738 
1739 int
1740 rtmask_add_global(struct sockaddr *mask)
1741 {
1742 	struct netmsg_base msg;
1743 
1744 	netmsg_init(&msg, NULL, &curthread->td_msgport,
1745 		    0, rtmask_add_msghandler);
1746 	msg.lmsg.u.ms_resultp = mask;
1747 
1748 	return lwkt_domsg(rtable_portfn(0), &msg.lmsg, 0);
1749 }
1750 
1751 struct sockaddr *
1752 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1753 {
1754 	struct radix_node *n;
1755 
1756 #define	clen(s)	(*(u_char *)(s))
1757 	n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1758 	if (n != NULL &&
1759 	    mask->sa_len >= clen(n->rn_key) &&
1760 	    bcmp((char *)mask + 1,
1761 		 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1762 		return (struct sockaddr *)n->rn_key;
1763 	} else {
1764 		return NULL;
1765 	}
1766 #undef clen
1767 }
1768 
1769 static void
1770 rtmask_add_msghandler(netmsg_t msg)
1771 {
1772 	struct lwkt_msg *lmsg = &msg->lmsg;
1773 	struct sockaddr *mask = lmsg->u.ms_resultp;
1774 	int error = 0, nextcpu;
1775 
1776 	if (rtmask_lookup(mask) == NULL)
1777 		error = ENOBUFS;
1778 
1779 	nextcpu = mycpuid + 1;
1780 	if (!error && nextcpu < ncpus)
1781 		lwkt_forwardmsg(rtable_portfn(nextcpu), lmsg);
1782 	else
1783 		lwkt_replymsg(lmsg, error);
1784 }
1785 
1786 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1787 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1788