xref: /dragonfly/sys/netinet/in_rmx.c (revision 6b47f3ea)
1 /*
2  * Copyright 1994, 1995 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $
30  * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $
31  */
32 
33 /*
34  * This code does two things necessary for the enhanced TCP metrics to
35  * function in a useful manner:
36  *  1) It marks all non-host routes as `cloning', thus ensuring that
37  *     every actual reference to such a route actually gets turned
38  *     into a reference to a host route to the specific destination
39  *     requested.
40  *  2) When such routes lose all their references, it arranges for them
41  *     to be deleted in some random collection of circumstances, so that
42  *     a large quantity of stale routing data is not kept in kernel memory
43  *     indefinitely.  See in_rtqtimo() below for the exact mechanism.
44  */
45 
46 #include "opt_carp.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #include <sys/socket.h>
53 #include <sys/mbuf.h>
54 #include <sys/syslog.h>
55 #include <sys/globaldata.h>
56 #include <sys/thread2.h>
57 
58 #include <net/if.h>
59 #include <net/route.h>
60 #include <net/if_var.h>
61 #ifdef CARP
62 #include <net/if_types.h>
63 #endif
64 #include <net/netmsg2.h>
65 #include <net/netisr2.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_flow.h>
70 
71 #define RTPRF_EXPIRING	RTF_PROTO3	/* set on routes we manage */
72 
73 struct in_rtq_pcpu {
74 	struct radix_node_head	*rnh;
75 
76 	struct callout		timo_ch;
77 	struct netmsg_base	timo_nmsg;
78 
79 	time_t			lastdrain;
80 	int			draining;
81 	struct netmsg_base	drain_nmsg;
82 } __cachealign;
83 
84 static void	in_rtqtimo(void *);
85 
86 static struct in_rtq_pcpu in_rtq_pcpu[MAXCPU];
87 
88 /*
89  * Do what we need to do when inserting a route.
90  */
91 static struct radix_node *
92 in_addroute(const void *key, const void *mask, struct radix_node_head *head,
93 	    struct radix_node *nodes)
94 {
95 	struct rtentry *rt = (struct rtentry *)nodes;
96 	struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
97 	struct radix_node *ret;
98 	struct in_ifaddr_container *iac;
99 	struct in_ifaddr *ia;
100 
101 	/*
102 	 * For IP, mark routes to multicast addresses as such, because
103 	 * it's easy to do and might be useful (but this is much more
104 	 * dubious since it's so easy to inspect the address).
105 	 *
106 	 * For IP, all unicast non-host routes are automatically cloning.
107 	 */
108 	if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
109 		rt->rt_flags |= RTF_MULTICAST;
110 
111 	if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST)))
112 		rt->rt_flags |= RTF_PRCLONING;
113 
114 	/*
115 	 *   For host routes, we make sure that RTF_BROADCAST
116 	 *   is set for anything that looks like a broadcast address.
117 	 *   This way, we can avoid an expensive call to in_broadcast()
118 	 *   in ip_output() most of the time (because the route passed
119 	 *   to ip_output() is almost always a host route).
120 	 *
121 	 *   For local routes we set RTF_LOCAL allowing various shortcuts.
122 	 *
123 	 *   A cloned network route will point to one of several possible
124 	 *   addresses if an interface has aliases and must be repointed
125 	 *   back to the correct address or arp_rtrequest() will not properly
126 	 *   detect the local ip.
127 	 */
128 	if (rt->rt_flags & RTF_HOST) {
129 		if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
130 			rt->rt_flags |= RTF_BROADCAST;
131 		} else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
132 			   sin->sin_addr.s_addr) {
133 			rt->rt_flags |= RTF_LOCAL;
134 		} else {
135 			LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr),
136 				     ia_hash) {
137 				ia = iac->ia;
138 				if (sin->sin_addr.s_addr ==
139 				    ia->ia_addr.sin_addr.s_addr) {
140 					rt->rt_flags |= RTF_LOCAL;
141 					IFAREF(&ia->ia_ifa);
142 					IFAFREE(rt->rt_ifa);
143 					rt->rt_ifa = &ia->ia_ifa;
144 					rt->rt_ifp = rt->rt_ifa->ifa_ifp;
145 					break;
146 				}
147 			}
148 		}
149 	}
150 
151 	if (rt->rt_rmx.rmx_mtu == 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) &&
152 	    rt->rt_ifp != NULL)
153 		rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
154 
155 	ret = rn_addroute(key, mask, head, nodes);
156 	if (ret == NULL && (rt->rt_flags & RTF_HOST)) {
157 		struct rtentry *oldrt;
158 
159 		/*
160 		 * We are trying to add a host route, but can't.
161 		 * Find out if it is because of an ARP entry and
162 		 * delete it if so.
163 		 */
164 		oldrt = rtpurelookup((struct sockaddr *)sin);
165 		if (oldrt != NULL) {
166 			--oldrt->rt_refcnt;
167 			if ((oldrt->rt_flags & RTF_LLINFO) &&
168 			    (oldrt->rt_flags & RTF_HOST) &&
169 			    oldrt->rt_gateway &&
170 			    oldrt->rt_gateway->sa_family == AF_LINK) {
171 				rtrequest(RTM_DELETE, rt_key(oldrt),
172 					  oldrt->rt_gateway, rt_mask(oldrt),
173 					  oldrt->rt_flags, NULL);
174 				ret = rn_addroute(key, mask, head, nodes);
175 			}
176 		}
177 	}
178 
179 	/*
180 	 * If the new route has been created successfully, and it is
181 	 * not a multicast/broadcast or cloned route, then we will
182 	 * have to flush the ipflow.  Otherwise, we may end up using
183 	 * the wrong route.
184 	 */
185 	if (ret != NULL &&
186 	    (rt->rt_flags &
187 	     (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0)
188 		ipflow_flush_oncpu();
189 	return ret;
190 }
191 
192 /*
193  * This code is the inverse of in_closeroute: on first reference, if we
194  * were managing the route, stop doing so and set the expiration timer
195  * back off again.
196  */
197 static struct radix_node *
198 in_matchroute(const void *key, struct radix_node_head *head)
199 {
200 	struct radix_node *rn = rn_match(key, head);
201 	struct rtentry *rt = (struct rtentry *)rn;
202 
203 	if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */
204 		if (rt->rt_flags & RTPRF_EXPIRING) {
205 			rt->rt_flags &= ~RTPRF_EXPIRING;
206 			rt->rt_rmx.rmx_expire = 0;
207 		}
208 	}
209 	return rn;
210 }
211 
212 static int rtq_reallyold = 60*60;  /* one hour is ``really old'' */
213 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
214     &rtq_reallyold , 0,
215     "Default expiration time on cloned routes");
216 
217 static int rtq_minreallyold = 10;  /* never automatically crank down to less */
218 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
219     &rtq_minreallyold , 0,
220     "Minimum time to attempt to hold onto cloned routes");
221 
222 static int rtq_toomany = 128;	   /* 128 cached routes is ``too many'' */
223 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
224     &rtq_toomany , 0, "Upper limit on cloned routes");
225 
226 /*
227  * On last reference drop, mark the route as belong to us so that it can be
228  * timed out.
229  */
230 static void
231 in_closeroute(struct radix_node *rn, struct radix_node_head *head)
232 {
233 	struct rtentry *rt = (struct rtentry *)rn;
234 
235 	if (!(rt->rt_flags & RTF_UP))
236 		return;		/* prophylactic measures */
237 
238 	if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
239 		return;
240 
241 	if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED)
242 		return;
243 
244 	/*
245 	 * As requested by David Greenman:
246 	 * If rtq_reallyold is 0, just delete the route without
247 	 * waiting for a timeout cycle to kill it.
248 	 */
249 	if (rtq_reallyold != 0) {
250 		rt->rt_flags |= RTPRF_EXPIRING;
251 		rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold;
252 	} else {
253 		/*
254 		 * Remove route from the radix tree, but defer deallocation
255 		 * until we return to rtfree().
256 		 */
257 		rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt),
258 			  rt->rt_flags, &rt);
259 	}
260 }
261 
262 struct rtqk_arg {
263 	struct radix_node_head *rnh;
264 	int draining;
265 	int killed;
266 	int found;
267 	int updating;
268 	time_t nextstop;
269 };
270 
271 /*
272  * Get rid of old routes.  When draining, this deletes everything, even when
273  * the timeout is not expired yet.  When updating, this makes sure that
274  * nothing has a timeout longer than the current value of rtq_reallyold.
275  */
276 static int
277 in_rtqkill(struct radix_node *rn, void *rock)
278 {
279 	struct rtqk_arg *ap = rock;
280 	struct rtentry *rt = (struct rtentry *)rn;
281 	int err;
282 
283 	if (rt->rt_flags & RTPRF_EXPIRING) {
284 		ap->found++;
285 		if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
286 			if (rt->rt_refcnt > 0)
287 				panic("rtqkill route really not free");
288 
289 			err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
290 					rt_mask(rt), rt->rt_flags, NULL);
291 			if (err)
292 				log(LOG_WARNING, "in_rtqkill: error %d\n", err);
293 			else
294 				ap->killed++;
295 		} else {
296 			if (ap->updating &&
297 			    (int)(rt->rt_rmx.rmx_expire - time_uptime) >
298 			     rtq_reallyold) {
299 				rt->rt_rmx.rmx_expire = time_uptime +
300 				    rtq_reallyold;
301 			}
302 			ap->nextstop = lmin(ap->nextstop,
303 					    rt->rt_rmx.rmx_expire);
304 		}
305 	}
306 
307 	return 0;
308 }
309 
310 #define RTQ_TIMEOUT	60*10	/* run no less than once every ten minutes */
311 static int rtq_timeout = RTQ_TIMEOUT;
312 
313 /*
314  * NOTE:
315  * 'last_adjusted_timeout' and 'rtq_reallyold' are _not_ read-only, and
316  * could be changed by all CPUs.  However, they are changed at so low
317  * frequency that we could ignore the cache trashing issue and take them
318  * as read-mostly.
319  */
320 static void
321 in_rtqtimo_dispatch(netmsg_t nmsg)
322 {
323 	struct rtqk_arg arg;
324 	struct timeval atv;
325 	static time_t last_adjusted_timeout = 0;
326 	struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid];
327 	struct radix_node_head *rnh = pcpu->rnh;
328 
329 	ASSERT_NETISR_NCPUS(mycpuid);
330 
331 	/* Reply ASAP */
332 	crit_enter();
333 	lwkt_replymsg(&nmsg->lmsg, 0);
334 	crit_exit();
335 
336 	arg.found = arg.killed = 0;
337 	arg.rnh = rnh;
338 	arg.nextstop = time_uptime + rtq_timeout;
339 	arg.draining = arg.updating = 0;
340 	rnh->rnh_walktree(rnh, in_rtqkill, &arg);
341 
342 	/*
343 	 * Attempt to be somewhat dynamic about this:
344 	 * If there are ``too many'' routes sitting around taking up space,
345 	 * then crank down the timeout, and see if we can't make some more
346 	 * go away.  However, we make sure that we will never adjust more
347 	 * than once in rtq_timeout seconds, to keep from cranking down too
348 	 * hard.
349 	 */
350 	if ((arg.found - arg.killed > rtq_toomany) &&
351 	    (int)(time_uptime - last_adjusted_timeout) >= rtq_timeout &&
352 	    rtq_reallyold > rtq_minreallyold) {
353 		rtq_reallyold = 2*rtq_reallyold / 3;
354 		if (rtq_reallyold < rtq_minreallyold) {
355 			rtq_reallyold = rtq_minreallyold;
356 		}
357 
358 		last_adjusted_timeout = time_uptime;
359 #ifdef DIAGNOSTIC
360 		log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
361 		    rtq_reallyold);
362 #endif
363 		arg.found = arg.killed = 0;
364 		arg.updating = 1;
365 		rnh->rnh_walktree(rnh, in_rtqkill, &arg);
366 	}
367 
368 	atv.tv_usec = 0;
369 	atv.tv_sec = arg.nextstop - time_uptime;
370 	if ((int)atv.tv_sec < 1) {		/* time shift safety */
371 		atv.tv_sec = 1;
372 		arg.nextstop = time_uptime + atv.tv_sec;
373 	}
374 	if ((int)atv.tv_sec > rtq_timeout) {	/* time shift safety */
375 		atv.tv_sec = rtq_timeout;
376 		arg.nextstop = time_uptime + atv.tv_sec;
377 	}
378 	callout_reset(&pcpu->timo_ch, tvtohz_high(&atv), in_rtqtimo, NULL);
379 }
380 
381 static void
382 in_rtqtimo(void *arg __unused)
383 {
384 	int cpuid = mycpuid;
385 	struct lwkt_msg *lmsg = &in_rtq_pcpu[cpuid].timo_nmsg.lmsg;
386 
387 	crit_enter();
388 	if (lmsg->ms_flags & MSGF_DONE)
389 		lwkt_sendmsg_oncpu(netisr_cpuport(cpuid), lmsg);
390 	crit_exit();
391 }
392 
393 static void
394 in_rtqdrain_oncpu(struct in_rtq_pcpu *pcpu)
395 {
396 	struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET];
397 	struct rtqk_arg arg;
398 
399 	ASSERT_NETISR_NCPUS(mycpuid);
400 
401 	arg.found = arg.killed = 0;
402 	arg.rnh = rnh;
403 	arg.nextstop = 0;
404 	arg.draining = 1;
405 	arg.updating = 0;
406 	rnh->rnh_walktree(rnh, in_rtqkill, &arg);
407 
408 	pcpu->lastdrain = time_uptime;
409 }
410 
411 static void
412 in_rtqdrain_dispatch(netmsg_t nmsg)
413 {
414 	struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid];
415 
416 	/* Reply ASAP */
417 	crit_enter();
418 	lwkt_replymsg(&nmsg->lmsg, 0);
419 	crit_exit();
420 
421 	in_rtqdrain_oncpu(pcpu);
422 	pcpu->draining = 0;
423 }
424 
425 static void
426 in_rtqdrain_ipi(void *arg __unused)
427 {
428 	int cpu = mycpuid;
429 	struct lwkt_msg *msg = &in_rtq_pcpu[cpu].drain_nmsg.lmsg;
430 
431 	crit_enter();
432 	if (msg->ms_flags & MSGF_DONE)
433 		lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg);
434 	crit_exit();
435 }
436 
437 void
438 in_rtqdrain(void)
439 {
440 	cpumask_t mask;
441 	int cpu;
442 
443 	CPUMASK_ASSBMASK(mask, netisr_ncpus);
444 	CPUMASK_ANDMASK(mask, smp_active_mask);
445 
446 	cpu = mycpuid;
447 	if (IN_NETISR_NCPUS(cpu)) {
448 		in_rtqdrain_oncpu(&in_rtq_pcpu[cpu]);
449 		CPUMASK_NANDBIT(mask, cpu);
450 	}
451 
452 	for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
453 		struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[cpu];
454 
455 		if (!CPUMASK_TESTBIT(mask, cpu))
456 			continue;
457 
458 		if (pcpu->draining || pcpu->lastdrain == time_uptime) {
459 			/* Just drained or is draining; skip this cpu. */
460 			CPUMASK_NANDBIT(mask, cpu);
461 			continue;
462 		}
463 		pcpu->draining = 1;
464 	}
465 
466 	if (CPUMASK_TESTNZERO(mask))
467 		lwkt_send_ipiq_mask(mask, in_rtqdrain_ipi, NULL);
468 }
469 
470 /*
471  * Initialize our routing tree.
472  */
473 int
474 in_inithead(void **head, int off)
475 {
476 	struct radix_node_head *rnh;
477 	struct in_rtq_pcpu *pcpu;
478 	int cpuid = mycpuid;
479 
480 	rnh = *head;
481 	KKASSERT(rnh == rt_tables[cpuid][AF_INET]);
482 
483 	if (!rn_inithead(&rnh, rn_cpumaskhead(cpuid), off))
484 		return 0;
485 
486 	*head = rnh;
487 	rnh->rnh_addaddr = in_addroute;
488 	rnh->rnh_matchaddr = in_matchroute;
489 	rnh->rnh_close = in_closeroute;
490 
491 	pcpu = &in_rtq_pcpu[cpuid];
492 	pcpu->rnh = rnh;
493 	callout_init_mp(&pcpu->timo_ch);
494 	netmsg_init(&pcpu->timo_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY,
495 	    in_rtqtimo_dispatch);
496 	netmsg_init(&pcpu->drain_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY,
497 	    in_rtqdrain_dispatch);
498 
499 	in_rtqtimo(NULL);	/* kick off timeout first time */
500 	return 1;
501 }
502 
503 /*
504  * This zaps old routes when the interface goes down or interface
505  * address is deleted.  In the latter case, it deletes static routes
506  * that point to this address.  If we don't do this, we may end up
507  * using the old address in the future.  The ones we always want to
508  * get rid of are things like ARP entries, since the user might down
509  * the interface, walk over to a completely different network, and
510  * plug back in.
511  *
512  * in_ifadown() is typically called when an interface is being brought
513  * down.  We must iterate through all per-cpu route tables and clean
514  * them up.
515  */
516 struct in_ifadown_arg {
517 	struct radix_node_head *rnh;
518 	struct ifaddr *ifa;
519 	int del;
520 };
521 
522 static int
523 in_ifadownkill(struct radix_node *rn, void *xap)
524 {
525 	struct in_ifadown_arg *ap = xap;
526 	struct rtentry *rt = (struct rtentry *)rn;
527 	int err;
528 
529 	if (rt->rt_ifa == ap->ifa &&
530 	    (ap->del || !(rt->rt_flags & RTF_STATIC))) {
531 		/*
532 		 * We need to disable the automatic prune that happens
533 		 * in this case in rtrequest() because it will blow
534 		 * away the pointers that rn_walktree() needs in order
535 		 * continue our descent.  We will end up deleting all
536 		 * the routes that rtrequest() would have in any case,
537 		 * so that behavior is not needed there.
538 		 */
539 		rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING);
540 		err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
541 				rt_mask(rt), rt->rt_flags, NULL);
542 		if (err)
543 			log(LOG_WARNING, "in_ifadownkill: error %d\n", err);
544 	}
545 	return 0;
546 }
547 
548 struct netmsg_ifadown {
549 	struct netmsg_base	base;
550 	struct ifaddr		*ifa;
551 	int			del;
552 };
553 
554 static void
555 in_ifadown_dispatch(netmsg_t msg)
556 {
557 	struct netmsg_ifadown *rmsg = (void *)msg;
558 	struct radix_node_head *rnh;
559 	struct ifaddr *ifa = rmsg->ifa;
560 	struct in_ifadown_arg arg;
561 	int cpu;
562 
563 	cpu = mycpuid;
564 	ASSERT_NETISR_NCPUS(cpu);
565 
566 	arg.rnh = rnh = rt_tables[cpu][AF_INET];
567 	arg.ifa = ifa;
568 	arg.del = rmsg->del;
569 	rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
570 	ifa->ifa_flags &= ~IFA_ROUTE;
571 
572 	netisr_forwardmsg(&msg->base, cpu + 1);
573 }
574 
575 int
576 in_ifadown_force(struct ifaddr *ifa, int delete)
577 {
578 	struct netmsg_ifadown msg;
579 
580 	if (ifa->ifa_addr->sa_family != AF_INET)
581 		return 1;
582 
583 	/*
584 	 * XXX individual requests are not independantly chained,
585 	 * which means that the per-cpu route tables will not be
586 	 * consistent in the middle of the operation.  If routes
587 	 * related to the interface are manipulated while we are
588 	 * doing this the inconsistancy could trigger a panic.
589 	 */
590 	netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
591 	    in_ifadown_dispatch);
592 	msg.ifa = ifa;
593 	msg.del = delete;
594 	netisr_domsg_global(&msg.base);
595 
596 	return 0;
597 }
598 
599 int
600 in_ifadown(struct ifaddr *ifa, int delete)
601 {
602 #ifdef CARP
603 	if (ifa->ifa_ifp->if_type == IFT_CARP)
604 		return 0;
605 #endif
606 	return in_ifadown_force(ifa, delete);
607 }
608