xref: /dragonfly/sys/net/route.c (revision 0e32b8c5)
1 /*
2  * Copyright (c) 2004, 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)route.c	8.3 (Berkeley) 1/9/95
62  * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
63  */
64 
65 #include "opt_inet.h"
66 #include "opt_mpls.h"
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/mbuf.h>
72 #include <sys/socket.h>
73 #include <sys/domain.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
78 
79 #include <net/if.h>
80 #include <net/if_var.h>
81 #include <net/route.h>
82 #include <net/netisr.h>
83 
84 #include <netinet/in.h>
85 #include <net/ip_mroute/ip_mroute.h>
86 
87 #include <sys/thread2.h>
88 #include <sys/msgport2.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
91 
92 #ifdef MPLS
93 #include <netproto/mpls/mpls.h>
94 #endif
95 
96 static struct rtstatistics rtstatistics_percpu[MAXCPU] __cachealign;
97 #define rtstat	rtstatistics_percpu[mycpuid]
98 
99 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
100 
101 static void	rt_maskedcopy (struct sockaddr *, struct sockaddr *,
102 			       struct sockaddr *);
103 static void rtable_init(void);
104 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
105 				      struct rtentry *, void *);
106 
107 static void rtredirect_msghandler(netmsg_t msg);
108 static void rtrequest1_msghandler(netmsg_t msg);
109 static void rtsearch_msghandler(netmsg_t msg);
110 static void rtmask_add_msghandler(netmsg_t msg);
111 
112 static int rt_setshims(struct rtentry *, struct sockaddr **);
113 
114 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
115 
116 #ifdef ROUTE_DEBUG
117 static int route_debug = 1;
118 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
119 	   &route_debug, 0, "");
120 #endif
121 
122 u_long route_kmalloc_limit = 0;
123 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit);
124 
125 /*
126  * Initialize the route table(s) for protocol domains and
127  * create a helper thread which will be responsible for updating
128  * route table entries on each cpu.
129  */
130 void
131 route_init(void)
132 {
133 	int cpu;
134 
135 	if (route_kmalloc_limit)
136 		kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit);
137 
138 	for (cpu = 0; cpu < netisr_ncpus; ++cpu)
139 		bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
140 	rn_init();      /* initialize all zeroes, all ones, mask table */
141 	rtable_init();	/* call dom_rtattach() on each cpu */
142 }
143 
144 static void
145 rtable_init_oncpu(netmsg_t msg)
146 {
147 	struct domain *dom;
148 	int cpu = mycpuid;
149 
150 	ASSERT_NETISR_NCPUS(cpu);
151 
152 	SLIST_FOREACH(dom, &domains, dom_next) {
153 		if (dom->dom_rtattach) {
154 			dom->dom_rtattach(
155 				(void **)&rt_tables[cpu][dom->dom_family],
156 				dom->dom_rtoffset);
157 		}
158 	}
159 	netisr_forwardmsg(&msg->base, cpu + 1);
160 }
161 
162 static void
163 rtable_init(void)
164 {
165 	struct netmsg_base msg;
166 
167 	netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
168 	netisr_domsg_global(&msg);
169 }
170 
171 /*
172  * Routing statistics.
173  */
174 static int
175 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
176 {
177 	int cpu, error = 0;
178 
179 	for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
180 		if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
181 					sizeof(struct rtstatistics))))
182 				break;
183 		if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
184 					sizeof(struct rtstatistics))))
185 				break;
186 	}
187 
188 	return (error);
189 }
190 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
191 	0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
192 
193 /*
194  * Packet routing routines.
195  */
196 
197 /*
198  * Look up and fill in the "ro_rt" rtentry field in a route structure given
199  * an address in the "ro_dst" field.  Always send a report on a miss and
200  * always clone routes.
201  */
202 void
203 rtalloc(struct route *ro)
204 {
205 	rtalloc_ign(ro, 0UL);
206 }
207 
208 /*
209  * Look up and fill in the "ro_rt" rtentry field in a route structure given
210  * an address in the "ro_dst" field.  Always send a report on a miss and
211  * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
212  * ignored.
213  */
214 void
215 rtalloc_ign(struct route *ro, u_long ignoreflags)
216 {
217 	if (ro->ro_rt != NULL) {
218 		if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
219 			return;
220 		rtfree(ro->ro_rt);
221 		ro->ro_rt = NULL;
222 	}
223 	ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
224 }
225 
226 /*
227  * Look up the route that matches the given "dst" address.
228  *
229  * Route lookup can have the side-effect of creating and returning
230  * a cloned route instead when "dst" matches a cloning route and the
231  * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
232  *
233  * Any route returned has its reference count incremented.
234  */
235 struct rtentry *
236 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
237 {
238 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
239 	struct rtentry *rt;
240 
241 	ASSERT_NETISR_NCPUS(mycpuid);
242 
243 	if (rnh == NULL)
244 		goto unreach;
245 
246 	/*
247 	 * Look up route in the radix tree.
248 	 */
249 	rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
250 	if (rt == NULL)
251 		goto unreach;
252 
253 	/*
254 	 * Handle cloning routes.
255 	 */
256 	if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
257 		struct rtentry *clonedroute;
258 		int error;
259 
260 		clonedroute = rt;	/* copy in/copy out parameter */
261 		error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
262 				  &clonedroute);	/* clone the route */
263 		if (error != 0) {	/* cloning failed */
264 			if (generate_report)
265 				rt_dstmsg(RTM_MISS, dst, error);
266 			rt->rt_refcnt++;
267 			return (rt);	/* return the uncloned route */
268 		}
269 		if (generate_report) {
270 			if (clonedroute->rt_flags & RTF_XRESOLVE)
271 				rt_dstmsg(RTM_RESOLVE, dst, 0);
272 			else
273 				rt_rtmsg(RTM_ADD, clonedroute,
274 					 clonedroute->rt_ifp, 0);
275 		}
276 		return (clonedroute);	/* return cloned route */
277 	}
278 
279 	/*
280 	 * Increment the reference count of the matched route and return.
281 	 */
282 	rt->rt_refcnt++;
283 	return (rt);
284 
285 unreach:
286 	rtstat.rts_unreach++;
287 	if (generate_report)
288 		rt_dstmsg(RTM_MISS, dst, 0);
289 	return (NULL);
290 }
291 
292 void
293 rtfree(struct rtentry *rt)
294 {
295 
296 	ASSERT_NETISR_NCPUS(rt->rt_cpuid);
297 	KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
298 
299 	--rt->rt_refcnt;
300 	if (rt->rt_refcnt == 0) {
301 		struct radix_node_head *rnh =
302 		    rt_tables[mycpuid][rt_key(rt)->sa_family];
303 
304 		if (rnh->rnh_close)
305 			rnh->rnh_close((struct radix_node *)rt, rnh);
306 		if (!(rt->rt_flags & RTF_UP)) {
307 			/* deallocate route */
308 			if (rt->rt_ifa != NULL)
309 				IFAFREE(rt->rt_ifa);
310 			if (rt->rt_parent != NULL)
311 				RTFREE(rt->rt_parent);	/* recursive call! */
312 			Free(rt_key(rt));
313 			Free(rt);
314 		}
315 	}
316 }
317 
318 static void
319 rtfree_async_dispatch(netmsg_t msg)
320 {
321 	struct rtentry *rt = msg->lmsg.u.ms_resultp;
322 
323 	rtfree(rt);
324 	netisr_replymsg(&msg->base, 0);
325 }
326 
327 void
328 rtfree_async(struct rtentry *rt)
329 {
330 	struct netmsg_base *msg;
331 
332 	if (IN_NETISR_NCPUS(rt->rt_cpuid)) {
333 		rtfree(rt);
334 		return;
335 	}
336 
337 	KASSERT(rt->rt_refcnt > 0,
338 	    ("rtfree_async: rt_refcnt %ld", rt->rt_refcnt));
339 
340 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_INTWAIT);
341 	netmsg_init(msg, NULL, &netisr_afree_rport, 0, rtfree_async_dispatch);
342 	msg->lmsg.u.ms_resultp = rt;
343 
344 	netisr_sendmsg(msg, rt->rt_cpuid);
345 }
346 
347 int
348 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
349 		 struct sockaddr *netmask, int flags, struct sockaddr *src)
350 {
351 	struct rtentry *rt = NULL;
352 	struct rt_addrinfo rtinfo;
353 	struct ifaddr *ifa;
354 	u_long *stat = NULL;
355 	int error;
356 
357 	ASSERT_NETISR_NCPUS(mycpuid);
358 
359 	/* verify the gateway is directly reachable */
360 	if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
361 		error = ENETUNREACH;
362 		goto out;
363 	}
364 
365 	/*
366 	 * If the redirect isn't from our current router for this destination,
367 	 * it's either old or wrong.
368 	 */
369 	if (!(flags & RTF_DONE) &&		/* XXX JH */
370 	    (rt = rtpurelookup(dst)) != NULL &&
371 	    (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
372 		error = EINVAL;
373 		goto done;
374 	}
375 
376 	/*
377 	 * If it redirects us to ourselves, we have a routing loop,
378 	 * perhaps as a result of an interface going down recently.
379 	 */
380 	if (ifa_ifwithaddr(gateway)) {
381 		error = EHOSTUNREACH;
382 		goto done;
383 	}
384 
385 	/*
386 	 * Create a new entry if the lookup failed or if we got back
387 	 * a wildcard entry for the default route.  This is necessary
388 	 * for hosts which use routing redirects generated by smart
389 	 * gateways to dynamically build the routing tables.
390 	 */
391 	if (rt == NULL)
392 		goto create;
393 	if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
394 		rtfree(rt);
395 		goto create;
396 	}
397 
398 	/* Ignore redirects for directly connected hosts. */
399 	if (!(rt->rt_flags & RTF_GATEWAY)) {
400 		error = EHOSTUNREACH;
401 		goto done;
402 	}
403 
404 	if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
405 		/*
406 		 * Changing from a network route to a host route.
407 		 * Create a new host route rather than smashing the
408 		 * network route.
409 		 */
410 create:
411 		flags |=  RTF_GATEWAY | RTF_DYNAMIC;
412 		bzero(&rtinfo, sizeof(struct rt_addrinfo));
413 		rtinfo.rti_info[RTAX_DST] = dst;
414 		rtinfo.rti_info[RTAX_GATEWAY] = gateway;
415 		rtinfo.rti_info[RTAX_NETMASK] = netmask;
416 		rtinfo.rti_flags = flags;
417 		rtinfo.rti_ifa = ifa;
418 		rt = NULL;	/* copy-in/copy-out parameter */
419 		error = rtrequest1(RTM_ADD, &rtinfo, &rt);
420 		if (rt != NULL)
421 			flags = rt->rt_flags;
422 		stat = &rtstat.rts_dynamic;
423 	} else {
424 		/*
425 		 * Smash the current notion of the gateway to this destination.
426 		 * Should check about netmask!!!
427 		 */
428 		rt->rt_flags |= RTF_MODIFIED;
429 		flags |= RTF_MODIFIED;
430 
431 		/* We only need to report rtmsg on CPU0 */
432 		rt_setgate(rt, rt_key(rt), gateway,
433 			   mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
434 		error = 0;
435 		stat = &rtstat.rts_newgateway;
436 	}
437 
438 done:
439 	if (rt != NULL)
440 		rtfree(rt);
441 out:
442 	if (error != 0)
443 		rtstat.rts_badredirect++;
444 	else if (stat != NULL)
445 		(*stat)++;
446 
447 	return error;
448 }
449 
450 struct netmsg_rtredirect {
451 	struct netmsg_base base;
452 	struct sockaddr *dst;
453 	struct sockaddr *gateway;
454 	struct sockaddr *netmask;
455 	int		flags;
456 	struct sockaddr *src;
457 };
458 
459 /*
460  * Force a routing table entry to the specified
461  * destination to go through the given gateway.
462  * Normally called as a result of a routing redirect
463  * message from the network layer.
464  */
465 void
466 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
467 	   struct sockaddr *netmask, int flags, struct sockaddr *src)
468 {
469 	struct rt_addrinfo rtinfo;
470 	int error;
471 	struct netmsg_rtredirect msg;
472 
473 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
474 		    0, rtredirect_msghandler);
475 	msg.dst = dst;
476 	msg.gateway = gateway;
477 	msg.netmask = netmask;
478 	msg.flags = flags;
479 	msg.src = src;
480 	error = netisr_domsg_global(&msg.base);
481 
482 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
483 	rtinfo.rti_info[RTAX_DST] = dst;
484 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
485 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
486 	rtinfo.rti_info[RTAX_AUTHOR] = src;
487 	rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
488 }
489 
490 static void
491 rtredirect_msghandler(netmsg_t msg)
492 {
493 	struct netmsg_rtredirect *rmsg = (void *)msg;
494 
495 	rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
496 			 rmsg->flags, rmsg->src);
497 	netisr_forwardmsg(&msg->base, mycpuid + 1);
498 }
499 
500 /*
501 * Routing table ioctl interface.
502 */
503 int
504 rtioctl(u_long req, caddr_t data, struct ucred *cred)
505 {
506 #ifdef INET
507 	/* Multicast goop, grrr... */
508 	return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
509 #else
510 	return ENXIO;
511 #endif
512 }
513 
514 struct ifaddr *
515 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
516 {
517 	struct ifaddr *ifa;
518 
519 	if (!(flags & RTF_GATEWAY)) {
520 		/*
521 		 * If we are adding a route to an interface,
522 		 * and the interface is a point-to-point link,
523 		 * we should search for the destination
524 		 * as our clue to the interface.  Otherwise
525 		 * we can use the local address.
526 		 */
527 		ifa = NULL;
528 		if (flags & RTF_HOST) {
529 			ifa = ifa_ifwithdstaddr(dst);
530 		}
531 		if (ifa == NULL)
532 			ifa = ifa_ifwithaddr(gateway);
533 	} else {
534 		/*
535 		 * If we are adding a route to a remote net
536 		 * or host, the gateway may still be on the
537 		 * other end of a pt to pt link.
538 		 */
539 		ifa = ifa_ifwithdstaddr(gateway);
540 	}
541 	if (ifa == NULL)
542 		ifa = ifa_ifwithnet(gateway);
543 	if (ifa == NULL) {
544 		struct rtentry *rt;
545 
546 		rt = rtpurelookup(gateway);
547 		if (rt == NULL)
548 			return (NULL);
549 		rt->rt_refcnt--;
550 		if ((ifa = rt->rt_ifa) == NULL)
551 			return (NULL);
552 	}
553 	if (ifa->ifa_addr->sa_family != dst->sa_family) {
554 		struct ifaddr *oldifa = ifa;
555 
556 		ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
557 		if (ifa == NULL)
558 			ifa = oldifa;
559 	}
560 	return (ifa);
561 }
562 
563 static int rt_fixdelete (struct radix_node *, void *);
564 static int rt_fixchange (struct radix_node *, void *);
565 
566 struct rtfc_arg {
567 	struct rtentry *rt0;
568 	struct radix_node_head *rnh;
569 };
570 
571 /*
572  * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
573  */
574 int
575 rt_getifa(struct rt_addrinfo *rtinfo)
576 {
577 	struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
578 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
579 	struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
580 	int flags = rtinfo->rti_flags;
581 
582 	/*
583 	 * ifp may be specified by sockaddr_dl
584 	 * when protocol address is ambiguous.
585 	 */
586 	if (rtinfo->rti_ifp == NULL) {
587 		struct sockaddr *ifpaddr;
588 
589 		ifpaddr = rtinfo->rti_info[RTAX_IFP];
590 		if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
591 			struct ifaddr *ifa;
592 
593 			ifa = ifa_ifwithnet(ifpaddr);
594 			if (ifa != NULL)
595 				rtinfo->rti_ifp = ifa->ifa_ifp;
596 		}
597 	}
598 
599 	if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
600 		rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
601 	if (rtinfo->rti_ifa == NULL) {
602 		struct sockaddr *sa;
603 
604 		sa = ifaaddr != NULL ? ifaaddr :
605 		    (gateway != NULL ? gateway : dst);
606 		if (sa != NULL && rtinfo->rti_ifp != NULL)
607 			rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
608 		else if (dst != NULL && gateway != NULL)
609 			rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
610 		else if (sa != NULL)
611 			rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
612 	}
613 	if (rtinfo->rti_ifa == NULL)
614 		return (ENETUNREACH);
615 
616 	if (rtinfo->rti_ifp == NULL)
617 		rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
618 	return (0);
619 }
620 
621 /*
622  * Do appropriate manipulations of a routing tree given
623  * all the bits of info needed
624  */
625 int
626 rtrequest(
627 	int req,
628 	struct sockaddr *dst,
629 	struct sockaddr *gateway,
630 	struct sockaddr *netmask,
631 	int flags,
632 	struct rtentry **ret_nrt)
633 {
634 	struct rt_addrinfo rtinfo;
635 
636 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
637 	rtinfo.rti_info[RTAX_DST] = dst;
638 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
639 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
640 	rtinfo.rti_flags = flags;
641 	return rtrequest1(req, &rtinfo, ret_nrt);
642 }
643 
644 int
645 rtrequest_global(
646 	int req,
647 	struct sockaddr *dst,
648 	struct sockaddr *gateway,
649 	struct sockaddr *netmask,
650 	int flags)
651 {
652 	struct rt_addrinfo rtinfo;
653 
654 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
655 	rtinfo.rti_info[RTAX_DST] = dst;
656 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
657 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
658 	rtinfo.rti_flags = flags;
659 	return rtrequest1_global(req, &rtinfo, NULL, NULL, RTREQ_PRIO_NORM);
660 }
661 
662 struct netmsg_rtq {
663 	struct netmsg_base	base;
664 	int			req;
665 	struct rt_addrinfo	*rtinfo;
666 	rtrequest1_callback_func_t callback;
667 	void			*arg;
668 };
669 
670 int
671 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
672     rtrequest1_callback_func_t callback, void *arg, boolean_t req_prio)
673 {
674 	struct netmsg_rtq msg;
675 	int flags = 0;
676 
677 	if (req_prio)
678 		flags = MSGF_PRIORITY;
679 	netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
680 	    rtrequest1_msghandler);
681 	msg.base.lmsg.ms_error = -1;
682 	msg.req = req;
683 	msg.rtinfo = rtinfo;
684 	msg.callback = callback;
685 	msg.arg = arg;
686 	return (netisr_domsg_global(&msg.base));
687 }
688 
689 /*
690  * Handle a route table request on the current cpu.  Since the route table's
691  * are supposed to be identical on each cpu, an error occuring later in the
692  * message chain is considered system-fatal.
693  */
694 static void
695 rtrequest1_msghandler(netmsg_t msg)
696 {
697 	struct netmsg_rtq *rmsg = (void *)msg;
698 	struct rt_addrinfo rtinfo;
699 	struct rtentry *rt = NULL;
700 	int error;
701 
702 	/*
703 	 * Copy the rtinfo.  We need to make sure that the original
704 	 * rtinfo, which is setup by the caller, in the netmsg will
705 	 * _not_ be changed; else the next CPU on the netmsg forwarding
706 	 * path will see a different rtinfo than what this CPU has seen.
707 	 */
708 	rtinfo = *rmsg->rtinfo;
709 
710 	error = rtrequest1(rmsg->req, &rtinfo, &rt);
711 	if (rt)
712 		--rt->rt_refcnt;
713 	if (rmsg->callback)
714 		rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
715 
716 	/*
717 	 * RTM_DELETE's are propogated even if an error occurs, since a
718 	 * cloned route might be undergoing deletion and cloned routes
719 	 * are not necessarily replicated.  An overall error is returned
720 	 * only if no cpus have the route in question.
721 	 */
722 	if (rmsg->base.lmsg.ms_error < 0 || error == 0)
723 		rmsg->base.lmsg.ms_error = error;
724 
725 	if (error && rmsg->req != RTM_DELETE) {
726 		if (mycpuid != 0) {
727 			panic("rtrequest1_msghandler: rtrequest table req %d, "
728 			    "failed on cpu%d, error %d\n",
729 			    rmsg->req, mycpuid, error);
730 		}
731 		netisr_replymsg(&rmsg->base, error);
732 	} else {
733 		netisr_forwardmsg_error(&rmsg->base, mycpuid + 1,
734 		    rmsg->base.lmsg.ms_error);
735 	}
736 }
737 
738 int
739 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
740 {
741 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
742 	struct rtentry *rt;
743 	struct radix_node *rn;
744 	struct radix_node_head *rnh;
745 	struct ifaddr *ifa;
746 	struct sockaddr *ndst;
747 	boolean_t reportmsg;
748 	int error = 0;
749 
750 	ASSERT_NETISR_NCPUS(mycpuid);
751 
752 #define gotoerr(x) { error = x ; goto bad; }
753 
754 #ifdef ROUTE_DEBUG
755 	if (route_debug)
756 		rt_addrinfo_print(req, rtinfo);
757 #endif
758 
759 	crit_enter();
760 	/*
761 	 * Find the correct routing tree to use for this Address Family
762 	 */
763 	if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
764 		gotoerr(EAFNOSUPPORT);
765 
766 	/*
767 	 * If we are adding a host route then we don't want to put
768 	 * a netmask in the tree, nor do we want to clone it.
769 	 */
770 	if (rtinfo->rti_flags & RTF_HOST) {
771 		rtinfo->rti_info[RTAX_NETMASK] = NULL;
772 		rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
773 	}
774 
775 	switch (req) {
776 	case RTM_DELETE:
777 		/* Remove the item from the tree. */
778 		rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
779 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
780 				      rnh);
781 		if (rn == NULL)
782 			gotoerr(ESRCH);
783 		KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
784 			("rnh_deladdr returned flags 0x%x", rn->rn_flags));
785 		rt = (struct rtentry *)rn;
786 
787 		/* ref to prevent a deletion race */
788 		++rt->rt_refcnt;
789 
790 		/* Free any routes cloned from this one. */
791 		if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
792 		    rt_mask(rt) != NULL) {
793 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
794 					       (char *)rt_mask(rt),
795 					       rt_fixdelete, rt);
796 		}
797 
798 		if (rt->rt_gwroute != NULL) {
799 			RTFREE(rt->rt_gwroute);
800 			rt->rt_gwroute = NULL;
801 		}
802 
803 		/*
804 		 * NB: RTF_UP must be set during the search above,
805 		 * because we might delete the last ref, causing
806 		 * rt to get freed prematurely.
807 		 */
808 		rt->rt_flags &= ~RTF_UP;
809 
810 #ifdef ROUTE_DEBUG
811 		if (route_debug)
812 			rt_print(rtinfo, rt);
813 #endif
814 
815 		/* Give the protocol a chance to keep things in sync. */
816 		if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
817 			ifa->ifa_rtrequest(RTM_DELETE, rt);
818 
819 		/*
820 		 * If the caller wants it, then it can have it,
821 		 * but it's up to it to free the rtentry as we won't be
822 		 * doing it.
823 		 */
824 		KASSERT(rt->rt_refcnt >= 0,
825 			("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
826 		if (ret_nrt != NULL) {
827 			/* leave ref intact for return */
828 			*ret_nrt = rt;
829 		} else {
830 			/* deref / attempt to destroy */
831 			rtfree(rt);
832 		}
833 		break;
834 
835 	case RTM_RESOLVE:
836 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
837 			gotoerr(EINVAL);
838 
839 		KASSERT(rt->rt_cpuid == mycpuid,
840 		    ("rt resolve rt_cpuid %d, mycpuid %d",
841 		     rt->rt_cpuid, mycpuid));
842 
843 		ifa = rt->rt_ifa;
844 		rtinfo->rti_flags =
845 		    rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
846 		rtinfo->rti_flags |= RTF_WASCLONED;
847 		rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
848 		if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
849 			rtinfo->rti_flags |= RTF_HOST;
850 		rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
851 		rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
852 		rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
853 		goto makeroute;
854 
855 	case RTM_ADD:
856 		KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
857 			rtinfo->rti_info[RTAX_GATEWAY] != NULL,
858 		    ("rtrequest: GATEWAY but no gateway"));
859 
860 		if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
861 			gotoerr(error);
862 		ifa = rtinfo->rti_ifa;
863 makeroute:
864 		R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
865 		if (rt == NULL) {
866 			if (req == RTM_ADD) {
867 				kprintf("rtrequest1: alloc rtentry failed on "
868 				    "cpu%d\n", mycpuid);
869 			}
870 			gotoerr(ENOBUFS);
871 		}
872 		bzero(rt, sizeof(struct rtentry));
873 		rt->rt_flags = RTF_UP | rtinfo->rti_flags;
874 		rt->rt_cpuid = mycpuid;
875 
876 		if (mycpuid != 0 && req == RTM_ADD) {
877 			/* For RTM_ADD, we have already sent rtmsg on CPU0. */
878 			reportmsg = RTL_DONTREPORT;
879 		} else {
880 			/*
881 			 * For RTM_ADD, we only send rtmsg on CPU0.
882 			 * For RTM_RESOLVE, we always send rtmsg. XXX
883 			 */
884 			reportmsg = RTL_REPORTMSG;
885 		}
886 		error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
887 				   reportmsg);
888 		if (error != 0) {
889 			Free(rt);
890 			gotoerr(error);
891 		}
892 
893 		ndst = rt_key(rt);
894 		if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
895 			rt_maskedcopy(dst, ndst,
896 				      rtinfo->rti_info[RTAX_NETMASK]);
897 		else
898 			bcopy(dst, ndst, dst->sa_len);
899 
900 		if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
901 			rt_setshims(rt, rtinfo->rti_info);
902 
903 		/*
904 		 * Note that we now have a reference to the ifa.
905 		 * This moved from below so that rnh->rnh_addaddr() can
906 		 * examine the ifa and  ifa->ifa_ifp if it so desires.
907 		 */
908 		IFAREF(ifa);
909 		rt->rt_ifa = ifa;
910 		rt->rt_ifp = ifa->ifa_ifp;
911 		/* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
912 
913 		rn = rnh->rnh_addaddr((char *)ndst,
914 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
915 				      rnh, rt->rt_nodes);
916 		if (rn == NULL) {
917 			struct rtentry *oldrt;
918 
919 			/*
920 			 * We already have one of these in the tree.
921 			 * We do a special hack: if the old route was
922 			 * cloned, then we blow it away and try
923 			 * re-inserting the new one.
924 			 */
925 			oldrt = rtpurelookup(ndst);
926 			if (oldrt != NULL) {
927 				--oldrt->rt_refcnt;
928 				if (oldrt->rt_flags & RTF_WASCLONED) {
929 					rtrequest(RTM_DELETE, rt_key(oldrt),
930 						  oldrt->rt_gateway,
931 						  rt_mask(oldrt),
932 						  oldrt->rt_flags, NULL);
933 					rn = rnh->rnh_addaddr((char *)ndst,
934 					    (char *)
935 						rtinfo->rti_info[RTAX_NETMASK],
936 					    rnh, rt->rt_nodes);
937 				}
938 			}
939 		}
940 		/* NOTE: rt_ifa may have been changed */
941 		ifa = rt->rt_ifa;
942 
943 		/*
944 		 * If it still failed to go into the tree,
945 		 * then un-make it (this should be a function).
946 		 */
947 		if (rn == NULL) {
948 			if (rt->rt_gwroute != NULL)
949 				rtfree(rt->rt_gwroute);
950 			IFAFREE(ifa);
951 			Free(rt_key(rt));
952 			Free(rt);
953 			gotoerr(EEXIST);
954 		}
955 
956 		/*
957 		 * If we got here from RESOLVE, then we are cloning
958 		 * so clone the rest, and note that we
959 		 * are a clone (and increment the parent's references)
960 		 */
961 		if (req == RTM_RESOLVE) {
962 			rt->rt_rmx = (*ret_nrt)->rt_rmx;    /* copy metrics */
963 			rt->rt_rmx.rmx_pksent = 0;  /* reset packet counter */
964 			if ((*ret_nrt)->rt_flags &
965 				       (RTF_CLONING | RTF_PRCLONING)) {
966 				rt->rt_parent = *ret_nrt;
967 				(*ret_nrt)->rt_refcnt++;
968 			}
969 		}
970 
971 		/*
972 		 * if this protocol has something to add to this then
973 		 * allow it to do that as well.
974 		 */
975 		if (ifa->ifa_rtrequest != NULL)
976 			ifa->ifa_rtrequest(req, rt);
977 
978 		/*
979 		 * We repeat the same procedure from rt_setgate() here because
980 		 * it doesn't fire when we call it there because the node
981 		 * hasn't been added to the tree yet.
982 		 */
983 		if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
984 		    rt_mask(rt) != NULL) {
985 			struct rtfc_arg arg = { rt, rnh };
986 
987 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
988 					       (char *)rt_mask(rt),
989 					       rt_fixchange, &arg);
990 		}
991 
992 #ifdef ROUTE_DEBUG
993 		if (route_debug)
994 			rt_print(rtinfo, rt);
995 #endif
996 		/*
997 		 * Return the resulting rtentry,
998 		 * increasing the number of references by one.
999 		 */
1000 		if (ret_nrt != NULL) {
1001 			rt->rt_refcnt++;
1002 			*ret_nrt = rt;
1003 		}
1004 		break;
1005 	case RTM_GET:
1006 		/* Get the item from the tree. */
1007 		rn = rnh->rnh_lookup((char *)rtinfo->rti_info[RTAX_DST],
1008 				     (char *)rtinfo->rti_info[RTAX_NETMASK],
1009 				      rnh);
1010 		if (rn == NULL)
1011 			gotoerr(ESRCH);
1012 		if (ret_nrt != NULL) {
1013 			rt = (struct rtentry *)rn;
1014 			rt->rt_refcnt++;
1015 			*ret_nrt = rt;
1016 		}
1017 		break;
1018 	default:
1019 		error = EOPNOTSUPP;
1020 	}
1021 bad:
1022 #ifdef ROUTE_DEBUG
1023 	if (route_debug) {
1024 		if (error)
1025 			kprintf("rti %p failed error %d\n", rtinfo, error);
1026 		else
1027 			kprintf("rti %p succeeded\n", rtinfo);
1028 	}
1029 #endif
1030 	crit_exit();
1031 	return (error);
1032 }
1033 
1034 /*
1035  * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1036  * (i.e., the routes related to it by the operation of cloning).  This
1037  * routine is iterated over all potential former-child-routes by way of
1038  * rnh->rnh_walktree_from() above, and those that actually are children of
1039  * the late parent (passed in as VP here) are themselves deleted.
1040  */
1041 static int
1042 rt_fixdelete(struct radix_node *rn, void *vp)
1043 {
1044 	struct rtentry *rt = (struct rtentry *)rn;
1045 	struct rtentry *rt0 = vp;
1046 
1047 	if (rt->rt_parent == rt0 &&
1048 	    !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1049 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1050 				 rt->rt_flags, NULL);
1051 	}
1052 	return 0;
1053 }
1054 
1055 /*
1056  * This routine is called from rt_setgate() to do the analogous thing for
1057  * adds and changes.  There is the added complication in this case of a
1058  * middle insert; i.e., insertion of a new network route between an older
1059  * network route and (cloned) host routes.  For this reason, a simple check
1060  * of rt->rt_parent is insufficient; each candidate route must be tested
1061  * against the (mask, value) of the new route (passed as before in vp)
1062  * to see if the new route matches it.
1063  *
1064  * XXX - it may be possible to do fixdelete() for changes and reserve this
1065  * routine just for adds.  I'm not sure why I thought it was necessary to do
1066  * changes this way.
1067  */
1068 #ifdef DEBUG
1069 static int rtfcdebug = 0;
1070 #endif
1071 
1072 static int
1073 rt_fixchange(struct radix_node *rn, void *vp)
1074 {
1075 	struct rtentry *rt = (struct rtentry *)rn;
1076 	struct rtfc_arg *ap = vp;
1077 	struct rtentry *rt0 = ap->rt0;
1078 	struct radix_node_head *rnh = ap->rnh;
1079 	u_char *xk1, *xm1, *xk2, *xmp;
1080 	int i, len, mlen;
1081 
1082 #ifdef DEBUG
1083 	if (rtfcdebug)
1084 		kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1085 #endif
1086 
1087 	if (rt->rt_parent == NULL ||
1088 	    (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1089 #ifdef DEBUG
1090 		if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1091 #endif
1092 		return 0;
1093 	}
1094 
1095 	if (rt->rt_parent == rt0) {
1096 #ifdef DEBUG
1097 		if (rtfcdebug) kprintf("parent match\n");
1098 #endif
1099 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1100 				 rt->rt_flags, NULL);
1101 	}
1102 
1103 	/*
1104 	 * There probably is a function somewhere which does this...
1105 	 * if not, there should be.
1106 	 */
1107 	len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1108 
1109 	xk1 = (u_char *)rt_key(rt0);
1110 	xm1 = (u_char *)rt_mask(rt0);
1111 	xk2 = (u_char *)rt_key(rt);
1112 
1113 	/* avoid applying a less specific route */
1114 	xmp = (u_char *)rt_mask(rt->rt_parent);
1115 	mlen = rt_key(rt->rt_parent)->sa_len;
1116 	if (mlen > rt_key(rt0)->sa_len) {
1117 #ifdef DEBUG
1118 		if (rtfcdebug)
1119 			kprintf("rt_fixchange: inserting a less "
1120 			       "specific route\n");
1121 #endif
1122 		return 0;
1123 	}
1124 	for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1125 		if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1126 #ifdef DEBUG
1127 			if (rtfcdebug)
1128 				kprintf("rt_fixchange: inserting a less "
1129 				       "specific route\n");
1130 #endif
1131 			return 0;
1132 		}
1133 	}
1134 
1135 	for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1136 		if ((xk2[i] & xm1[i]) != xk1[i]) {
1137 #ifdef DEBUG
1138 			if (rtfcdebug) kprintf("no match\n");
1139 #endif
1140 			return 0;
1141 		}
1142 	}
1143 
1144 	/*
1145 	 * OK, this node is a clone, and matches the node currently being
1146 	 * changed/added under the node's mask.  So, get rid of it.
1147 	 */
1148 #ifdef DEBUG
1149 	if (rtfcdebug) kprintf("deleting\n");
1150 #endif
1151 	return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1152 			 rt->rt_flags, NULL);
1153 }
1154 
1155 int
1156 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1157 	   boolean_t generate_report)
1158 {
1159 	char *space, *oldspace;
1160 	int dlen = RT_ROUNDUP(dst->sa_len), glen = RT_ROUNDUP(gate->sa_len);
1161 	struct rtentry *rt = rt0;
1162 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1163 
1164 	ASSERT_NETISR_NCPUS(mycpuid);
1165 
1166 	/*
1167 	 * A host route with the destination equal to the gateway
1168 	 * will interfere with keeping LLINFO in the routing
1169 	 * table, so disallow it.
1170 	 */
1171 	if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1172 			      (RTF_HOST | RTF_GATEWAY)) &&
1173 	    dst->sa_len == gate->sa_len &&
1174 	    sa_equal(dst, gate)) {
1175 		/*
1176 		 * The route might already exist if this is an RTM_CHANGE
1177 		 * or a routing redirect, so try to delete it.
1178 		 */
1179 		if (rt_key(rt0) != NULL)
1180 			rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1181 				  rt_mask(rt0), rt0->rt_flags, NULL);
1182 		return EADDRNOTAVAIL;
1183 	}
1184 
1185 	/*
1186 	 * Both dst and gateway are stored in the same malloc'ed chunk
1187 	 * (If I ever get my hands on....)
1188 	 * if we need to malloc a new chunk, then keep the old one around
1189 	 * till we don't need it any more.
1190 	 */
1191 	if (rt->rt_gateway == NULL ||
1192 	    glen > RT_ROUNDUP(rt->rt_gateway->sa_len)) {
1193 		oldspace = (char *)rt_key(rt);
1194 		R_Malloc(space, char *, dlen + glen);
1195 		if (space == NULL)
1196 			return ENOBUFS;
1197 		rt->rt_nodes->rn_key = space;
1198 	} else {
1199 		space = (char *)rt_key(rt);	/* Just use the old space. */
1200 		oldspace = NULL;
1201 	}
1202 
1203 	/* Set the gateway value. */
1204 	rt->rt_gateway = (struct sockaddr *)(space + dlen);
1205 	bcopy(gate, rt->rt_gateway, glen);
1206 
1207 	if (oldspace != NULL) {
1208 		/*
1209 		 * If we allocated a new chunk, preserve the original dst.
1210 		 * This way, rt_setgate() really just sets the gate
1211 		 * and leaves the dst field alone.
1212 		 */
1213 		bcopy(dst, space, dlen);
1214 		Free(oldspace);
1215 	}
1216 
1217 	/*
1218 	 * If there is already a gwroute, it's now almost definitely wrong
1219 	 * so drop it.
1220 	 */
1221 	if (rt->rt_gwroute != NULL) {
1222 		RTFREE(rt->rt_gwroute);
1223 		rt->rt_gwroute = NULL;
1224 	}
1225 	if (rt->rt_flags & RTF_GATEWAY) {
1226 		/*
1227 		 * Cloning loop avoidance: In the presence of
1228 		 * protocol-cloning and bad configuration, it is
1229 		 * possible to get stuck in bottomless mutual recursion
1230 		 * (rtrequest rt_setgate rtlookup).  We avoid this
1231 		 * by not allowing protocol-cloning to operate for
1232 		 * gateways (which is probably the correct choice
1233 		 * anyway), and avoid the resulting reference loops
1234 		 * by disallowing any route to run through itself as
1235 		 * a gateway.  This is obviously mandatory when we
1236 		 * get rt->rt_output().
1237 		 *
1238 		 * This breaks TTCP for hosts outside the gateway!  XXX JH
1239 		 */
1240 		rt->rt_gwroute = _rtlookup(gate, generate_report,
1241 					   RTF_PRCLONING);
1242 		if (rt->rt_gwroute == rt) {
1243 			rt->rt_gwroute = NULL;
1244 			--rt->rt_refcnt;
1245 			return EDQUOT; /* failure */
1246 		}
1247 	}
1248 
1249 	/*
1250 	 * This isn't going to do anything useful for host routes, so
1251 	 * don't bother.  Also make sure we have a reasonable mask
1252 	 * (we don't yet have one during adds).
1253 	 */
1254 	if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1255 		struct rtfc_arg arg = { rt, rnh };
1256 
1257 		rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1258 				       (char *)rt_mask(rt),
1259 				       rt_fixchange, &arg);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 static void
1266 rt_maskedcopy(
1267 	struct sockaddr *src,
1268 	struct sockaddr *dst,
1269 	struct sockaddr *netmask)
1270 {
1271 	u_char *cp1 = (u_char *)src;
1272 	u_char *cp2 = (u_char *)dst;
1273 	u_char *cp3 = (u_char *)netmask;
1274 	u_char *cplim = cp2 + *cp3;
1275 	u_char *cplim2 = cp2 + *cp1;
1276 
1277 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1278 	cp3 += 2;
1279 	if (cplim > cplim2)
1280 		cplim = cplim2;
1281 	while (cp2 < cplim)
1282 		*cp2++ = *cp1++ & *cp3++;
1283 	if (cp2 < cplim2)
1284 		bzero(cp2, cplim2 - cp2);
1285 }
1286 
1287 int
1288 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1289 {
1290 	struct rtentry *up_rt, *rt;
1291 
1292 	ASSERT_NETISR_NCPUS(mycpuid);
1293 
1294 	if (!(rt0->rt_flags & RTF_UP)) {
1295 		up_rt = rtlookup(dst);
1296 		if (up_rt == NULL)
1297 			return (EHOSTUNREACH);
1298 		up_rt->rt_refcnt--;
1299 	} else
1300 		up_rt = rt0;
1301 	if (up_rt->rt_flags & RTF_GATEWAY) {
1302 		if (up_rt->rt_gwroute == NULL) {
1303 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1304 			if (up_rt->rt_gwroute == NULL)
1305 				return (EHOSTUNREACH);
1306 		} else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1307 			rtfree(up_rt->rt_gwroute);
1308 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1309 			if (up_rt->rt_gwroute == NULL)
1310 				return (EHOSTUNREACH);
1311 		}
1312 		rt = up_rt->rt_gwroute;
1313 	} else
1314 		rt = up_rt;
1315 	if (rt->rt_flags & RTF_REJECT &&
1316 	    (rt->rt_rmx.rmx_expire == 0 ||		/* rt doesn't expire */
1317 	     time_uptime < rt->rt_rmx.rmx_expire))	/* rt not expired */
1318 		return (rt->rt_flags & RTF_HOST ?  EHOSTDOWN : EHOSTUNREACH);
1319 	*drt = rt;
1320 	return 0;
1321 }
1322 
1323 static int
1324 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1325 	int i;
1326 
1327 	for (i=0; i<3; i++) {
1328 		struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1329 		int shimlen;
1330 
1331 		if (shim == NULL)
1332 			break;
1333 
1334 		shimlen = RT_ROUNDUP(shim->sa_len);
1335 		R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1336 		bcopy(shim, rt->rt_shim[i], shimlen);
1337 	}
1338 
1339 	return 0;
1340 }
1341 
1342 #ifdef ROUTE_DEBUG
1343 
1344 /*
1345  * Print out a route table entry
1346  */
1347 void
1348 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1349 {
1350 	kprintf("rti %p cpu %d route %p flags %08lx: ",
1351 		rtinfo, mycpuid, rn, rn->rt_flags);
1352 	sockaddr_print(rt_key(rn));
1353 	kprintf(" mask ");
1354 	sockaddr_print(rt_mask(rn));
1355 	kprintf(" gw ");
1356 	sockaddr_print(rn->rt_gateway);
1357 	kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1358 	kprintf(" ifa %p\n", rn->rt_ifa);
1359 }
1360 
1361 void
1362 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1363 {
1364 	int didit = 0;
1365 	int i;
1366 
1367 #ifdef ROUTE_DEBUG
1368 	if (cmd == RTM_DELETE && route_debug > 1)
1369 		print_backtrace(-1);
1370 #endif
1371 
1372 	switch(cmd) {
1373 	case RTM_ADD:
1374 		kprintf("ADD ");
1375 		break;
1376 	case RTM_RESOLVE:
1377 		kprintf("RES ");
1378 		break;
1379 	case RTM_DELETE:
1380 		kprintf("DEL ");
1381 		break;
1382 	default:
1383 		kprintf("C%02d ", cmd);
1384 		break;
1385 	}
1386 	kprintf("rti %p cpu %d ", rti, mycpuid);
1387 	for (i = 0; i < rti->rti_addrs; ++i) {
1388 		if (rti->rti_info[i] == NULL)
1389 			continue;
1390 		if (didit)
1391 			kprintf(" ,");
1392 		switch(i) {
1393 		case RTAX_DST:
1394 			kprintf("(DST ");
1395 			break;
1396 		case RTAX_GATEWAY:
1397 			kprintf("(GWY ");
1398 			break;
1399 		case RTAX_NETMASK:
1400 			kprintf("(MSK ");
1401 			break;
1402 		case RTAX_GENMASK:
1403 			kprintf("(GEN ");
1404 			break;
1405 		case RTAX_IFP:
1406 			kprintf("(IFP ");
1407 			break;
1408 		case RTAX_IFA:
1409 			kprintf("(IFA ");
1410 			break;
1411 		case RTAX_AUTHOR:
1412 			kprintf("(AUT ");
1413 			break;
1414 		case RTAX_BRD:
1415 			kprintf("(BRD ");
1416 			break;
1417 		default:
1418 			kprintf("(?%02d ", i);
1419 			break;
1420 		}
1421 		sockaddr_print(rti->rti_info[i]);
1422 		kprintf(")");
1423 		didit = 1;
1424 	}
1425 	kprintf("\n");
1426 }
1427 
1428 void
1429 sockaddr_print(struct sockaddr *sa)
1430 {
1431 	struct sockaddr_in *sa4;
1432 	struct sockaddr_in6 *sa6;
1433 	int len;
1434 	int i;
1435 
1436 	if (sa == NULL) {
1437 		kprintf("NULL");
1438 		return;
1439 	}
1440 
1441 	len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1442 
1443 	switch(sa->sa_family) {
1444 	case AF_INET:
1445 	case AF_INET6:
1446 	default:
1447 		switch(sa->sa_family) {
1448 		case AF_INET:
1449 			sa4 = (struct sockaddr_in *)sa;
1450 			kprintf("INET %d %d.%d.%d.%d",
1451 				ntohs(sa4->sin_port),
1452 				(ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1453 				(ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1454 				(ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1455 				(ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1456 			);
1457 			break;
1458 		case AF_INET6:
1459 			sa6 = (struct sockaddr_in6 *)sa;
1460 			kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1461 				ntohs(sa6->sin6_port),
1462 				sa6->sin6_addr.s6_addr16[0],
1463 				sa6->sin6_addr.s6_addr16[1],
1464 				sa6->sin6_addr.s6_addr16[2],
1465 				sa6->sin6_addr.s6_addr16[3],
1466 				sa6->sin6_addr.s6_addr16[4],
1467 				sa6->sin6_addr.s6_addr16[5],
1468 				sa6->sin6_addr.s6_addr16[6],
1469 				sa6->sin6_addr.s6_addr16[7]
1470 			);
1471 			break;
1472 		default:
1473 			kprintf("AF%d ", sa->sa_family);
1474 			while (len > 0 && sa->sa_data[len-1] == 0)
1475 				--len;
1476 
1477 			for (i = 0; i < len; ++i) {
1478 				if (i)
1479 					kprintf(".");
1480 				kprintf("%d", (unsigned char)sa->sa_data[i]);
1481 			}
1482 			break;
1483 		}
1484 	}
1485 }
1486 
1487 #endif
1488 
1489 /*
1490  * Set up a routing table entry, normally for an interface.
1491  */
1492 int
1493 rtinit(struct ifaddr *ifa, int cmd, int flags)
1494 {
1495 	struct sockaddr *dst, *deldst, *netmask;
1496 	struct mbuf *m = NULL;
1497 	struct radix_node_head *rnh;
1498 	struct radix_node *rn;
1499 	struct rt_addrinfo rtinfo;
1500 	int error;
1501 
1502 	ASSERT_NETISR0;
1503 
1504 	if (flags & RTF_HOST) {
1505 		dst = ifa->ifa_dstaddr;
1506 		netmask = NULL;
1507 	} else {
1508 		dst = ifa->ifa_addr;
1509 		netmask = ifa->ifa_netmask;
1510 	}
1511 	/*
1512 	 * If it's a delete, check that if it exists, it's on the correct
1513 	 * interface or we might scrub a route to another ifa which would
1514 	 * be confusing at best and possibly worse.
1515 	 */
1516 	if (cmd == RTM_DELETE) {
1517 		/*
1518 		 * It's a delete, so it should already exist..
1519 		 * If it's a net, mask off the host bits
1520 		 * (Assuming we have a mask)
1521 		 */
1522 		if (netmask != NULL) {
1523 			m = m_get(M_NOWAIT, MT_SONAME);
1524 			if (m == NULL)
1525 				return (ENOBUFS);
1526 			mbuftrackid(m, 34);
1527 			deldst = mtod(m, struct sockaddr *);
1528 			rt_maskedcopy(dst, deldst, netmask);
1529 			dst = deldst;
1530 		}
1531 		/*
1532 		 * Look up an rtentry that is in the routing tree and
1533 		 * contains the correct info.
1534 		 */
1535 		if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1536 		    (rn = rnh->rnh_lookup((char *)dst,
1537 					  (char *)netmask, rnh)) == NULL ||
1538 		    ((struct rtentry *)rn)->rt_ifa != ifa ||
1539 		    !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1540 			if (m != NULL)
1541 				m_free(m);
1542 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1543 		}
1544 		/* XXX */
1545 #if 0
1546 		else {
1547 			/*
1548 			 * One would think that as we are deleting, and we know
1549 			 * it doesn't exist, we could just return at this point
1550 			 * with an "ELSE" clause, but apparently not..
1551 			 */
1552 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1553 		}
1554 #endif
1555 	}
1556 	/*
1557 	 * Do the actual request
1558 	 */
1559 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
1560 	rtinfo.rti_info[RTAX_DST] = dst;
1561 	rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1562 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
1563 	rtinfo.rti_flags = flags | ifa->ifa_flags;
1564 	rtinfo.rti_ifa = ifa;
1565 	error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa,
1566 	    RTREQ_PRIO_HIGH);
1567 	if (m != NULL)
1568 		m_free(m);
1569 	return (error);
1570 }
1571 
1572 static void
1573 rtinit_rtrequest_callback(int cmd, int error,
1574 			  struct rt_addrinfo *rtinfo, struct rtentry *rt,
1575 			  void *arg)
1576 {
1577 	struct ifaddr *ifa = arg;
1578 
1579 	if (error == 0 && rt) {
1580 		if (mycpuid == 0)
1581 			rt_newaddrmsg(cmd, ifa, error, rt);
1582 		if (cmd == RTM_DELETE) {
1583 			if (rt->rt_refcnt == 0) {
1584 				++rt->rt_refcnt;
1585 				rtfree(rt);
1586 			}
1587 		}
1588 	}
1589 }
1590 
1591 struct netmsg_rts {
1592 	struct netmsg_base	base;
1593 	int			req;
1594 	struct rt_addrinfo	*rtinfo;
1595 	rtsearch_callback_func_t callback;
1596 	void			*arg;
1597 	boolean_t		exact_match;
1598 	int			found_cnt;
1599 };
1600 
1601 int
1602 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1603     rtsearch_callback_func_t callback, void *arg, boolean_t exact_match,
1604     boolean_t req_prio)
1605 {
1606 	struct netmsg_rts msg;
1607 	int flags = 0;
1608 
1609 	if (req_prio)
1610 		flags = MSGF_PRIORITY;
1611 	netmsg_init(&msg.base, NULL, &curthread->td_msgport, flags,
1612 	    rtsearch_msghandler);
1613 	msg.req = req;
1614 	msg.rtinfo = rtinfo;
1615 	msg.callback = callback;
1616 	msg.arg = arg;
1617 	msg.exact_match = exact_match;
1618 	msg.found_cnt = 0;
1619 	return (netisr_domsg_global(&msg.base));
1620 }
1621 
1622 static void
1623 rtsearch_msghandler(netmsg_t msg)
1624 {
1625 	struct netmsg_rts *rmsg = (void *)msg;
1626 	struct rt_addrinfo rtinfo;
1627 	struct radix_node_head *rnh;
1628 	struct rtentry *rt;
1629 	int error;
1630 
1631 	ASSERT_NETISR_NCPUS(mycpuid);
1632 
1633 	/*
1634 	 * Copy the rtinfo.  We need to make sure that the original
1635 	 * rtinfo, which is setup by the caller, in the netmsg will
1636 	 * _not_ be changed; else the next CPU on the netmsg forwarding
1637 	 * path will see a different rtinfo than what this CPU has seen.
1638 	 */
1639 	rtinfo = *rmsg->rtinfo;
1640 
1641 	/*
1642 	 * Find the correct routing tree to use for this Address Family
1643 	 */
1644 	if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1645 		if (mycpuid != 0)
1646 			panic("partially initialized routing tables");
1647 		netisr_replymsg(&rmsg->base, EAFNOSUPPORT);
1648 		return;
1649 	}
1650 
1651 	/*
1652 	 * Correct rtinfo for the host route searching.
1653 	 */
1654 	if (rtinfo.rti_flags & RTF_HOST) {
1655 		rtinfo.rti_netmask = NULL;
1656 		rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1657 	}
1658 
1659 	rt = (struct rtentry *)
1660 	     rnh->rnh_lookup((char *)rtinfo.rti_dst,
1661 			     (char *)rtinfo.rti_netmask, rnh);
1662 
1663 	/*
1664 	 * If we are asked to do the "exact match", we need to make sure
1665 	 * that host route searching got a host route while a network
1666 	 * route searching got a network route.
1667 	 */
1668 	if (rt != NULL && rmsg->exact_match &&
1669 	    ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1670 		rt = NULL;
1671 
1672 	if (rt == NULL) {
1673 		/*
1674 		 * No matching routes have been found, don't count this
1675 		 * as a critical error (here, we set 'error' to 0), just
1676 		 * keep moving on, since at least prcloned routes are not
1677 		 * duplicated onto each CPU.
1678 		 */
1679 		error = 0;
1680 	} else {
1681 		rmsg->found_cnt++;
1682 
1683 		rt->rt_refcnt++;
1684 		error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1685 				      rmsg->found_cnt);
1686 		rt->rt_refcnt--;
1687 
1688 		if (error == EJUSTRETURN) {
1689 			netisr_replymsg(&rmsg->base, 0);
1690 			return;
1691 		}
1692 	}
1693 
1694 	if (error) {
1695 		KKASSERT(rmsg->found_cnt > 0);
1696 
1697 		/*
1698 		 * Under following cases, unrecoverable error has
1699 		 * not occured:
1700 		 * o  Request is RTM_GET
1701 		 * o  The first time that we find the route, but the
1702 		 *    modification fails.
1703 		 */
1704 		if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1705 			panic("rtsearch_msghandler: unrecoverable error "
1706 			      "cpu %d", mycpuid);
1707 		}
1708 		netisr_replymsg(&rmsg->base, error);
1709 	} else {
1710 		if (rmsg->found_cnt == 0) {
1711 			/* The requested route has not been seen ... */
1712 			error = ESRCH;
1713 		}
1714 		netisr_forwardmsg_error(&rmsg->base, mycpuid + 1, error);
1715 	}
1716 }
1717 
1718 int
1719 rtmask_add_global(struct sockaddr *mask, boolean_t req_prio)
1720 {
1721 	struct netmsg_base msg;
1722 	int flags = 0;
1723 
1724 	if (req_prio)
1725 		flags = MSGF_PRIORITY;
1726 	netmsg_init(&msg, NULL, &curthread->td_msgport, flags,
1727 	    rtmask_add_msghandler);
1728 	msg.lmsg.u.ms_resultp = mask;
1729 
1730 	return (netisr_domsg_global(&msg));
1731 }
1732 
1733 struct sockaddr *
1734 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1735 {
1736 	struct radix_node *n;
1737 
1738 #define	clen(s)	(*(u_char *)(s))
1739 	n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1740 	if (n != NULL &&
1741 	    mask->sa_len >= clen(n->rn_key) &&
1742 	    bcmp((char *)mask + 1,
1743 		 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1744 		return (struct sockaddr *)n->rn_key;
1745 	} else {
1746 		return NULL;
1747 	}
1748 #undef clen
1749 }
1750 
1751 static void
1752 rtmask_add_msghandler(netmsg_t msg)
1753 {
1754 	struct sockaddr *mask = msg->lmsg.u.ms_resultp;
1755 
1756 	ASSERT_NETISR_NCPUS(mycpuid);
1757 
1758 	if (rtmask_lookup(mask) == NULL) {
1759 		netisr_replymsg(&msg->base, ENOBUFS);
1760 		return;
1761 	}
1762 	netisr_forwardmsg(&msg->base, mycpuid + 1);
1763 }
1764 
1765 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1766 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1767 
1768 struct rtchange_arg {
1769 	struct ifaddr	*old_ifa;
1770 	struct ifaddr	*new_ifa;
1771 	struct rtentry	*rt;
1772 	int		changed;
1773 };
1774 
1775 static void
1776 rtchange_ifa(struct rtentry *rt, struct rtchange_arg *ap)
1777 {
1778 	if (rt->rt_ifa->ifa_rtrequest != NULL)
1779 		rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt);
1780 	IFAFREE(rt->rt_ifa);
1781 
1782 	IFAREF(ap->new_ifa);
1783 	rt->rt_ifa = ap->new_ifa;
1784 	rt->rt_ifp = ap->new_ifa->ifa_ifp;
1785 	if (rt->rt_ifa->ifa_rtrequest != NULL)
1786 		rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt);
1787 
1788 	ap->changed = 1;
1789 }
1790 
1791 static int
1792 rtchange_callback(struct radix_node *rn, void *xap)
1793 {
1794 	struct rtchange_arg *ap = xap;
1795 	struct rtentry *rt = (struct rtentry *)rn;
1796 
1797 	if (rt->rt_ifa == ap->old_ifa) {
1798 		if (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) {
1799 			/*
1800 			 * We could saw the branch off when we are
1801 			 * still sitting on it, if the ifa_rtrequest
1802 			 * DEL/ADD are called directly from here.
1803 			 */
1804 			ap->rt = rt;
1805 			return EJUSTRETURN;
1806 		}
1807 		rtchange_ifa(rt, ap);
1808 	}
1809 	return 0;
1810 }
1811 
1812 struct netmsg_rtchange {
1813 	struct netmsg_base	base;
1814 	struct ifaddr		*old_ifa;
1815 	struct ifaddr		*new_ifa;
1816 	int			changed;
1817 };
1818 
1819 static void
1820 rtchange_dispatch(netmsg_t msg)
1821 {
1822 	struct netmsg_rtchange *rmsg = (void *)msg;
1823 	struct radix_node_head *rnh;
1824 	struct rtchange_arg arg;
1825 	int cpu;
1826 
1827 	cpu = mycpuid;
1828 	ASSERT_NETISR_NCPUS(cpu);
1829 
1830 	memset(&arg, 0, sizeof(arg));
1831 	arg.old_ifa = rmsg->old_ifa;
1832 	arg.new_ifa = rmsg->new_ifa;
1833 
1834 	rnh = rt_tables[cpu][AF_INET];
1835 	for (;;) {
1836 		int error;
1837 
1838 		KKASSERT(arg.rt == NULL);
1839 		error = rnh->rnh_walktree(rnh, rtchange_callback, &arg);
1840 		if (arg.rt != NULL) {
1841 			struct rtentry *rt;
1842 
1843 			rt = arg.rt;
1844 			arg.rt = NULL;
1845 			rtchange_ifa(rt, &arg);
1846 		} else {
1847 			break;
1848 		}
1849 	}
1850 	if (arg.changed)
1851 		rmsg->changed = 1;
1852 
1853 	netisr_forwardmsg(&rmsg->base, cpu + 1);
1854 }
1855 
1856 int
1857 rtchange(struct ifaddr *old_ifa, struct ifaddr *new_ifa)
1858 {
1859 	struct netmsg_rtchange msg;
1860 
1861 	/*
1862 	 * XXX individual requests are not independantly chained,
1863 	 * which means that the per-cpu route tables will not be
1864 	 * consistent in the middle of the operation.  If routes
1865 	 * related to the interface are manipulated while we are
1866 	 * doing this the inconsistancy could trigger a panic.
1867 	 */
1868 	netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
1869 	    rtchange_dispatch);
1870 	msg.old_ifa = old_ifa;
1871 	msg.new_ifa = new_ifa;
1872 	msg.changed = 0;
1873 	netisr_domsg_global(&msg.base);
1874 
1875 	if (msg.changed) {
1876 		old_ifa->ifa_flags &= ~IFA_ROUTE;
1877 		new_ifa->ifa_flags |= IFA_ROUTE;
1878 		return 0;
1879 	} else {
1880 		return ENOENT;
1881 	}
1882 }
1883