1 /* 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $ 30 * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $ 31 */ 32 33 /* 34 * This code does two things necessary for the enhanced TCP metrics to 35 * function in a useful manner: 36 * 1) It marks all non-host routes as `cloning', thus ensuring that 37 * every actual reference to such a route actually gets turned 38 * into a reference to a host route to the specific destination 39 * requested. 40 * 2) When such routes lose all their references, it arranges for them 41 * to be deleted in some random collection of circumstances, so that 42 * a large quantity of stale routing data is not kept in kernel memory 43 * indefinitely. See in_rtqtimo() below for the exact mechanism. 44 */ 45 46 #include "opt_carp.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/sysctl.h> 52 #include <sys/socket.h> 53 #include <sys/mbuf.h> 54 #include <sys/syslog.h> 55 #include <sys/globaldata.h> 56 #include <sys/thread2.h> 57 58 #include <net/if.h> 59 #include <net/route.h> 60 #include <net/if_var.h> 61 #ifdef CARP 62 #include <net/if_types.h> 63 #endif 64 #include <net/netmsg2.h> 65 #include <net/netisr2.h> 66 #include <netinet/in.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/ip_flow.h> 70 71 #define RTPRF_EXPIRING RTF_PROTO3 /* set on routes we manage */ 72 73 static struct callout in_rtqtimo_ch[MAXCPU]; 74 75 /* 76 * Do what we need to do when inserting a route. 77 */ 78 static struct radix_node * 79 in_addroute(char *key, char *mask, struct radix_node_head *head, 80 struct radix_node *treenodes) 81 { 82 struct rtentry *rt = (struct rtentry *)treenodes; 83 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 84 struct radix_node *ret; 85 struct in_ifaddr_container *iac; 86 struct in_ifaddr *ia; 87 88 /* 89 * For IP, mark routes to multicast addresses as such, because 90 * it's easy to do and might be useful (but this is much more 91 * dubious since it's so easy to inspect the address). 92 * 93 * For IP, all unicast non-host routes are automatically cloning. 94 */ 95 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 96 rt->rt_flags |= RTF_MULTICAST; 97 98 if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) 99 rt->rt_flags |= RTF_PRCLONING; 100 101 /* 102 * For host routes, we make sure that RTF_BROADCAST 103 * is set for anything that looks like a broadcast address. 104 * This way, we can avoid an expensive call to in_broadcast() 105 * in ip_output() most of the time (because the route passed 106 * to ip_output() is almost always a host route). 107 * 108 * For local routes we set RTF_LOCAL allowing various shortcuts. 109 * 110 * A cloned network route will point to one of several possible 111 * addresses if an interface has aliases and must be repointed 112 * back to the correct address or arp_rtrequest() will not properly 113 * detect the local ip. 114 */ 115 if (rt->rt_flags & RTF_HOST) { 116 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 117 rt->rt_flags |= RTF_BROADCAST; 118 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 119 sin->sin_addr.s_addr) { 120 rt->rt_flags |= RTF_LOCAL; 121 } else { 122 LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr), 123 ia_hash) { 124 ia = iac->ia; 125 if (sin->sin_addr.s_addr == 126 ia->ia_addr.sin_addr.s_addr) { 127 rt->rt_flags |= RTF_LOCAL; 128 IFAREF(&ia->ia_ifa); 129 IFAFREE(rt->rt_ifa); 130 rt->rt_ifa = &ia->ia_ifa; 131 rt->rt_ifp = rt->rt_ifa->ifa_ifp; 132 break; 133 } 134 } 135 } 136 } 137 138 if (rt->rt_rmx.rmx_mtu != 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) && 139 rt->rt_ifp != NULL) 140 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 141 142 ret = rn_addroute(key, mask, head, treenodes); 143 if (ret == NULL && (rt->rt_flags & RTF_HOST)) { 144 struct rtentry *oldrt; 145 146 /* 147 * We are trying to add a host route, but can't. 148 * Find out if it is because of an ARP entry and 149 * delete it if so. 150 */ 151 oldrt = rtpurelookup((struct sockaddr *)sin); 152 if (oldrt != NULL) { 153 --oldrt->rt_refcnt; 154 if ((oldrt->rt_flags & RTF_LLINFO) && 155 (oldrt->rt_flags & RTF_HOST) && 156 oldrt->rt_gateway && 157 oldrt->rt_gateway->sa_family == AF_LINK) { 158 rtrequest(RTM_DELETE, rt_key(oldrt), 159 oldrt->rt_gateway, rt_mask(oldrt), 160 oldrt->rt_flags, NULL); 161 ret = rn_addroute(key, mask, head, treenodes); 162 } 163 } 164 } 165 166 /* 167 * If the new route has been created successfully, and it is 168 * not a multicast/broadcast or cloned route, then we will 169 * have to flush the ipflow. Otherwise, we may end up using 170 * the wrong route. 171 */ 172 if (ret != NULL && 173 (rt->rt_flags & 174 (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0) { 175 ipflow_flush_oncpu(); 176 } 177 return ret; 178 } 179 180 /* 181 * This code is the inverse of in_closeroute: on first reference, if we 182 * were managing the route, stop doing so and set the expiration timer 183 * back off again. 184 */ 185 static struct radix_node * 186 in_matchroute(char *key, struct radix_node_head *head) 187 { 188 struct radix_node *rn = rn_match(key, head); 189 struct rtentry *rt = (struct rtentry *)rn; 190 191 if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */ 192 if (rt->rt_flags & RTPRF_EXPIRING) { 193 rt->rt_flags &= ~RTPRF_EXPIRING; 194 rt->rt_rmx.rmx_expire = 0; 195 } 196 } 197 return rn; 198 } 199 200 static int rtq_reallyold = 60*60; /* one hour is ``really old'' */ 201 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 202 &rtq_reallyold , 0, 203 "Default expiration time on cloned routes"); 204 205 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 206 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 207 &rtq_minreallyold , 0, 208 "Minimum time to attempt to hold onto cloned routes"); 209 210 static int rtq_toomany = 128; /* 128 cached routes is ``too many'' */ 211 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 212 &rtq_toomany , 0, "Upper limit on cloned routes"); 213 214 /* 215 * On last reference drop, mark the route as belong to us so that it can be 216 * timed out. 217 */ 218 static void 219 in_closeroute(struct radix_node *rn, struct radix_node_head *head) 220 { 221 struct rtentry *rt = (struct rtentry *)rn; 222 223 if (!(rt->rt_flags & RTF_UP)) 224 return; /* prophylactic measures */ 225 226 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 227 return; 228 229 if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED) 230 return; 231 232 /* 233 * As requested by David Greenman: 234 * If rtq_reallyold is 0, just delete the route without 235 * waiting for a timeout cycle to kill it. 236 */ 237 if (rtq_reallyold != 0) { 238 rt->rt_flags |= RTPRF_EXPIRING; 239 rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; 240 } else { 241 /* 242 * Remove route from the radix tree, but defer deallocation 243 * until we return to rtfree(). 244 */ 245 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), 246 rt->rt_flags, &rt); 247 } 248 } 249 250 struct rtqk_arg { 251 struct radix_node_head *rnh; 252 int draining; 253 int killed; 254 int found; 255 int updating; 256 time_t nextstop; 257 }; 258 259 /* 260 * Get rid of old routes. When draining, this deletes everything, even when 261 * the timeout is not expired yet. When updating, this makes sure that 262 * nothing has a timeout longer than the current value of rtq_reallyold. 263 */ 264 static int 265 in_rtqkill(struct radix_node *rn, void *rock) 266 { 267 struct rtqk_arg *ap = rock; 268 struct rtentry *rt = (struct rtentry *)rn; 269 int err; 270 271 if (rt->rt_flags & RTPRF_EXPIRING) { 272 ap->found++; 273 if (ap->draining || rt->rt_rmx.rmx_expire <= time_second) { 274 if (rt->rt_refcnt > 0) 275 panic("rtqkill route really not free"); 276 277 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 278 rt_mask(rt), rt->rt_flags, NULL); 279 if (err) 280 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 281 else 282 ap->killed++; 283 } else { 284 if (ap->updating && 285 (rt->rt_rmx.rmx_expire - time_second > 286 rtq_reallyold)) { 287 rt->rt_rmx.rmx_expire = time_second + 288 rtq_reallyold; 289 } 290 ap->nextstop = lmin(ap->nextstop, 291 rt->rt_rmx.rmx_expire); 292 } 293 } 294 295 return 0; 296 } 297 298 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 299 static int rtq_timeout = RTQ_TIMEOUT; 300 301 static void 302 in_rtqtimo(void *rock) 303 { 304 struct radix_node_head *rnh = rock; 305 struct rtqk_arg arg; 306 struct timeval atv; 307 static time_t last_adjusted_timeout = 0; 308 309 arg.found = arg.killed = 0; 310 arg.rnh = rnh; 311 arg.nextstop = time_second + rtq_timeout; 312 arg.draining = arg.updating = 0; 313 crit_enter(); 314 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 315 crit_exit(); 316 317 /* 318 * Attempt to be somewhat dynamic about this: 319 * If there are ``too many'' routes sitting around taking up space, 320 * then crank down the timeout, and see if we can't make some more 321 * go away. However, we make sure that we will never adjust more 322 * than once in rtq_timeout seconds, to keep from cranking down too 323 * hard. 324 */ 325 if ((arg.found - arg.killed > rtq_toomany) && 326 (time_second - last_adjusted_timeout >= rtq_timeout) && 327 rtq_reallyold > rtq_minreallyold) { 328 rtq_reallyold = 2*rtq_reallyold / 3; 329 if (rtq_reallyold < rtq_minreallyold) { 330 rtq_reallyold = rtq_minreallyold; 331 } 332 333 last_adjusted_timeout = time_second; 334 #ifdef DIAGNOSTIC 335 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 336 rtq_reallyold); 337 #endif 338 arg.found = arg.killed = 0; 339 arg.updating = 1; 340 crit_enter(); 341 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 342 crit_exit(); 343 } 344 345 atv.tv_usec = 0; 346 atv.tv_sec = arg.nextstop - time_second; 347 callout_reset(&in_rtqtimo_ch[mycpuid], tvtohz_high(&atv), in_rtqtimo, 348 rock); 349 } 350 351 void 352 in_rtqdrain(void) 353 { 354 struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET]; 355 struct rtqk_arg arg; 356 357 arg.found = arg.killed = 0; 358 arg.rnh = rnh; 359 arg.nextstop = 0; 360 arg.draining = 1; 361 arg.updating = 0; 362 crit_enter(); 363 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 364 crit_exit(); 365 } 366 367 /* 368 * Initialize our routing tree. 369 */ 370 int 371 in_inithead(void **head, int off) 372 { 373 struct radix_node_head *rnh; 374 375 if (!rn_inithead(head, rn_cpumaskhead(mycpuid), off)) 376 return 0; 377 378 if (head != (void **)&rt_tables[mycpuid][AF_INET]) /* BOGUS! */ 379 return 1; /* only do this for the real routing table */ 380 381 rnh = *head; 382 rnh->rnh_addaddr = in_addroute; 383 rnh->rnh_matchaddr = in_matchroute; 384 rnh->rnh_close = in_closeroute; 385 callout_init(&in_rtqtimo_ch[mycpuid]); 386 in_rtqtimo(rnh); /* kick off timeout first time */ 387 return 1; 388 } 389 390 /* 391 * This zaps old routes when the interface goes down or interface 392 * address is deleted. In the latter case, it deletes static routes 393 * that point to this address. If we don't do this, we may end up 394 * using the old address in the future. The ones we always want to 395 * get rid of are things like ARP entries, since the user might down 396 * the interface, walk over to a completely different network, and 397 * plug back in. 398 * 399 * in_ifadown() is typically called when an interface is being brought 400 * down. We must iterate through all per-cpu route tables and clean 401 * them up. 402 */ 403 struct in_ifadown_arg { 404 struct radix_node_head *rnh; 405 struct ifaddr *ifa; 406 int del; 407 }; 408 409 static int 410 in_ifadownkill(struct radix_node *rn, void *xap) 411 { 412 struct in_ifadown_arg *ap = xap; 413 struct rtentry *rt = (struct rtentry *)rn; 414 int err; 415 416 if (rt->rt_ifa == ap->ifa && 417 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 418 /* 419 * We need to disable the automatic prune that happens 420 * in this case in rtrequest() because it will blow 421 * away the pointers that rn_walktree() needs in order 422 * continue our descent. We will end up deleting all 423 * the routes that rtrequest() would have in any case, 424 * so that behavior is not needed there. 425 */ 426 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING); 427 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 428 rt_mask(rt), rt->rt_flags, NULL); 429 if (err) 430 log(LOG_WARNING, "in_ifadownkill: error %d\n", err); 431 } 432 return 0; 433 } 434 435 struct netmsg_ifadown { 436 struct netmsg_base base; 437 struct ifaddr *ifa; 438 int del; 439 }; 440 441 static void 442 in_ifadown_dispatch(netmsg_t msg) 443 { 444 struct netmsg_ifadown *rmsg = (void *)msg; 445 struct radix_node_head *rnh; 446 struct ifaddr *ifa = rmsg->ifa; 447 struct in_ifadown_arg arg; 448 int nextcpu, cpu; 449 450 cpu = mycpuid; 451 452 arg.rnh = rnh = rt_tables[cpu][AF_INET]; 453 arg.ifa = ifa; 454 arg.del = rmsg->del; 455 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 456 ifa->ifa_flags &= ~IFA_ROUTE; 457 458 nextcpu = cpu + 1; 459 if (nextcpu < ncpus) 460 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg); 461 else 462 lwkt_replymsg(&rmsg->base.lmsg, 0); 463 } 464 465 int 466 in_ifadown_force(struct ifaddr *ifa, int delete) 467 { 468 struct netmsg_ifadown msg; 469 470 if (ifa->ifa_addr->sa_family != AF_INET) 471 return 1; 472 473 /* 474 * XXX individual requests are not independantly chained, 475 * which means that the per-cpu route tables will not be 476 * consistent in the middle of the operation. If routes 477 * related to the interface are manipulated while we are 478 * doing this the inconsistancy could trigger a panic. 479 */ 480 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 481 in_ifadown_dispatch); 482 msg.ifa = ifa; 483 msg.del = delete; 484 rt_domsg_global(&msg.base); 485 486 return 0; 487 } 488 489 int 490 in_ifadown(struct ifaddr *ifa, int delete) 491 { 492 #ifdef CARP 493 if (ifa->ifa_ifp->if_type == IFT_CARP) 494 return 0; 495 #endif 496 return in_ifadown_force(ifa, delete); 497 } 498