1 /* 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $ 30 * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $ 31 */ 32 33 /* 34 * This code does two things necessary for the enhanced TCP metrics to 35 * function in a useful manner: 36 * 1) It marks all non-host routes as `cloning', thus ensuring that 37 * every actual reference to such a route actually gets turned 38 * into a reference to a host route to the specific destination 39 * requested. 40 * 2) When such routes lose all their references, it arranges for them 41 * to be deleted in some random collection of circumstances, so that 42 * a large quantity of stale routing data is not kept in kernel memory 43 * indefinitely. See in_rtqtimo() below for the exact mechanism. 44 */ 45 46 #include "opt_carp.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/sysctl.h> 52 #include <sys/socket.h> 53 #include <sys/mbuf.h> 54 #include <sys/syslog.h> 55 #include <sys/globaldata.h> 56 #include <sys/thread2.h> 57 58 #include <net/if.h> 59 #include <net/route.h> 60 #include <net/if_var.h> 61 #ifdef CARP 62 #include <net/if_types.h> 63 #endif 64 #include <net/netmsg2.h> 65 #include <net/netisr2.h> 66 #include <netinet/in.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/ip_flow.h> 70 71 #define RTPRF_EXPIRING RTF_PROTO3 /* set on routes we manage */ 72 73 struct in_rtqtimo_ctx { 74 struct callout timo_ch; 75 struct netmsg_base timo_nmsg; 76 struct radix_node_head *timo_rnh; 77 } __cachealign; 78 79 static void in_rtqtimo(void *); 80 81 static struct in_rtqtimo_ctx in_rtqtimo_context[MAXCPU]; 82 static struct netmsg_base in_rtqdrain_netmsg[MAXCPU]; 83 84 /* 85 * Do what we need to do when inserting a route. 86 */ 87 static struct radix_node * 88 in_addroute(char *key, char *mask, struct radix_node_head *head, 89 struct radix_node *treenodes) 90 { 91 struct rtentry *rt = (struct rtentry *)treenodes; 92 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 93 struct radix_node *ret; 94 struct in_ifaddr_container *iac; 95 struct in_ifaddr *ia; 96 97 /* 98 * For IP, mark routes to multicast addresses as such, because 99 * it's easy to do and might be useful (but this is much more 100 * dubious since it's so easy to inspect the address). 101 * 102 * For IP, all unicast non-host routes are automatically cloning. 103 */ 104 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 105 rt->rt_flags |= RTF_MULTICAST; 106 107 if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) 108 rt->rt_flags |= RTF_PRCLONING; 109 110 /* 111 * For host routes, we make sure that RTF_BROADCAST 112 * is set for anything that looks like a broadcast address. 113 * This way, we can avoid an expensive call to in_broadcast() 114 * in ip_output() most of the time (because the route passed 115 * to ip_output() is almost always a host route). 116 * 117 * For local routes we set RTF_LOCAL allowing various shortcuts. 118 * 119 * A cloned network route will point to one of several possible 120 * addresses if an interface has aliases and must be repointed 121 * back to the correct address or arp_rtrequest() will not properly 122 * detect the local ip. 123 */ 124 if (rt->rt_flags & RTF_HOST) { 125 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 126 rt->rt_flags |= RTF_BROADCAST; 127 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 128 sin->sin_addr.s_addr) { 129 rt->rt_flags |= RTF_LOCAL; 130 } else { 131 LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr), 132 ia_hash) { 133 ia = iac->ia; 134 if (sin->sin_addr.s_addr == 135 ia->ia_addr.sin_addr.s_addr) { 136 rt->rt_flags |= RTF_LOCAL; 137 IFAREF(&ia->ia_ifa); 138 IFAFREE(rt->rt_ifa); 139 rt->rt_ifa = &ia->ia_ifa; 140 rt->rt_ifp = rt->rt_ifa->ifa_ifp; 141 break; 142 } 143 } 144 } 145 } 146 147 if (rt->rt_rmx.rmx_mtu == 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) && 148 rt->rt_ifp != NULL) 149 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 150 151 ret = rn_addroute(key, mask, head, treenodes); 152 if (ret == NULL && (rt->rt_flags & RTF_HOST)) { 153 struct rtentry *oldrt; 154 155 /* 156 * We are trying to add a host route, but can't. 157 * Find out if it is because of an ARP entry and 158 * delete it if so. 159 */ 160 oldrt = rtpurelookup((struct sockaddr *)sin); 161 if (oldrt != NULL) { 162 --oldrt->rt_refcnt; 163 if ((oldrt->rt_flags & RTF_LLINFO) && 164 (oldrt->rt_flags & RTF_HOST) && 165 oldrt->rt_gateway && 166 oldrt->rt_gateway->sa_family == AF_LINK) { 167 rtrequest(RTM_DELETE, rt_key(oldrt), 168 oldrt->rt_gateway, rt_mask(oldrt), 169 oldrt->rt_flags, NULL); 170 ret = rn_addroute(key, mask, head, treenodes); 171 } 172 } 173 } 174 175 /* 176 * If the new route has been created successfully, and it is 177 * not a multicast/broadcast or cloned route, then we will 178 * have to flush the ipflow. Otherwise, we may end up using 179 * the wrong route. 180 */ 181 if (ret != NULL && 182 (rt->rt_flags & 183 (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0) { 184 ipflow_flush_oncpu(); 185 } 186 return ret; 187 } 188 189 /* 190 * This code is the inverse of in_closeroute: on first reference, if we 191 * were managing the route, stop doing so and set the expiration timer 192 * back off again. 193 */ 194 static struct radix_node * 195 in_matchroute(char *key, struct radix_node_head *head) 196 { 197 struct radix_node *rn = rn_match(key, head); 198 struct rtentry *rt = (struct rtentry *)rn; 199 200 if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */ 201 if (rt->rt_flags & RTPRF_EXPIRING) { 202 rt->rt_flags &= ~RTPRF_EXPIRING; 203 rt->rt_rmx.rmx_expire = 0; 204 } 205 } 206 return rn; 207 } 208 209 static int rtq_reallyold = 60*60; /* one hour is ``really old'' */ 210 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 211 &rtq_reallyold , 0, 212 "Default expiration time on cloned routes"); 213 214 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 215 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 216 &rtq_minreallyold , 0, 217 "Minimum time to attempt to hold onto cloned routes"); 218 219 static int rtq_toomany = 128; /* 128 cached routes is ``too many'' */ 220 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 221 &rtq_toomany , 0, "Upper limit on cloned routes"); 222 223 /* 224 * On last reference drop, mark the route as belong to us so that it can be 225 * timed out. 226 */ 227 static void 228 in_closeroute(struct radix_node *rn, struct radix_node_head *head) 229 { 230 struct rtentry *rt = (struct rtentry *)rn; 231 232 if (!(rt->rt_flags & RTF_UP)) 233 return; /* prophylactic measures */ 234 235 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 236 return; 237 238 if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED) 239 return; 240 241 /* 242 * As requested by David Greenman: 243 * If rtq_reallyold is 0, just delete the route without 244 * waiting for a timeout cycle to kill it. 245 */ 246 if (rtq_reallyold != 0) { 247 rt->rt_flags |= RTPRF_EXPIRING; 248 rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold; 249 } else { 250 /* 251 * Remove route from the radix tree, but defer deallocation 252 * until we return to rtfree(). 253 */ 254 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), 255 rt->rt_flags, &rt); 256 } 257 } 258 259 struct rtqk_arg { 260 struct radix_node_head *rnh; 261 int draining; 262 int killed; 263 int found; 264 int updating; 265 time_t nextstop; 266 }; 267 268 /* 269 * Get rid of old routes. When draining, this deletes everything, even when 270 * the timeout is not expired yet. When updating, this makes sure that 271 * nothing has a timeout longer than the current value of rtq_reallyold. 272 */ 273 static int 274 in_rtqkill(struct radix_node *rn, void *rock) 275 { 276 struct rtqk_arg *ap = rock; 277 struct rtentry *rt = (struct rtentry *)rn; 278 int err; 279 280 if (rt->rt_flags & RTPRF_EXPIRING) { 281 ap->found++; 282 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { 283 if (rt->rt_refcnt > 0) 284 panic("rtqkill route really not free"); 285 286 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 287 rt_mask(rt), rt->rt_flags, NULL); 288 if (err) 289 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 290 else 291 ap->killed++; 292 } else { 293 if (ap->updating && 294 (int)(rt->rt_rmx.rmx_expire - time_uptime) > 295 rtq_reallyold) { 296 rt->rt_rmx.rmx_expire = time_uptime + 297 rtq_reallyold; 298 } 299 ap->nextstop = lmin(ap->nextstop, 300 rt->rt_rmx.rmx_expire); 301 } 302 } 303 304 return 0; 305 } 306 307 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 308 static int rtq_timeout = RTQ_TIMEOUT; 309 310 /* 311 * NOTE: 312 * 'last_adjusted_timeout' and 'rtq_reallyold' are _not_ read-only, and 313 * could be changed by all CPUs. However, they are changed at so low 314 * frequency that we could ignore the cache trashing issue and take them 315 * as read-mostly. 316 */ 317 static void 318 in_rtqtimo_dispatch(netmsg_t nmsg) 319 { 320 struct rtqk_arg arg; 321 struct timeval atv; 322 static time_t last_adjusted_timeout = 0; 323 struct in_rtqtimo_ctx *ctx = &in_rtqtimo_context[mycpuid]; 324 struct radix_node_head *rnh = ctx->timo_rnh; 325 326 /* Reply ASAP */ 327 crit_enter(); 328 lwkt_replymsg(&nmsg->lmsg, 0); 329 crit_exit(); 330 331 arg.found = arg.killed = 0; 332 arg.rnh = rnh; 333 arg.nextstop = time_uptime + rtq_timeout; 334 arg.draining = arg.updating = 0; 335 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 336 337 /* 338 * Attempt to be somewhat dynamic about this: 339 * If there are ``too many'' routes sitting around taking up space, 340 * then crank down the timeout, and see if we can't make some more 341 * go away. However, we make sure that we will never adjust more 342 * than once in rtq_timeout seconds, to keep from cranking down too 343 * hard. 344 */ 345 if ((arg.found - arg.killed > rtq_toomany) && 346 (int)(time_uptime - last_adjusted_timeout) >= rtq_timeout && 347 rtq_reallyold > rtq_minreallyold) { 348 rtq_reallyold = 2*rtq_reallyold / 3; 349 if (rtq_reallyold < rtq_minreallyold) { 350 rtq_reallyold = rtq_minreallyold; 351 } 352 353 last_adjusted_timeout = time_uptime; 354 #ifdef DIAGNOSTIC 355 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 356 rtq_reallyold); 357 #endif 358 arg.found = arg.killed = 0; 359 arg.updating = 1; 360 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 361 } 362 363 atv.tv_usec = 0; 364 atv.tv_sec = arg.nextstop - time_uptime; 365 if ((int)atv.tv_sec < 1) { /* time shift safety */ 366 atv.tv_sec = 1; 367 arg.nextstop = time_uptime + atv.tv_sec; 368 } 369 if ((int)atv.tv_sec > rtq_timeout) { /* time shift safety */ 370 atv.tv_sec = rtq_timeout; 371 arg.nextstop = time_uptime + atv.tv_sec; 372 } 373 callout_reset(&ctx->timo_ch, tvtohz_high(&atv), in_rtqtimo, NULL); 374 } 375 376 static void 377 in_rtqtimo(void *arg __unused) 378 { 379 int cpuid = mycpuid; 380 struct lwkt_msg *lmsg = &in_rtqtimo_context[cpuid].timo_nmsg.lmsg; 381 382 crit_enter(); 383 if (lmsg->ms_flags & MSGF_DONE) 384 lwkt_sendmsg_oncpu(netisr_cpuport(cpuid), lmsg); 385 crit_exit(); 386 } 387 388 static void 389 in_rtqdrain_dispatch(netmsg_t nmsg) 390 { 391 struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET]; 392 struct rtqk_arg arg; 393 394 /* Reply ASAP */ 395 crit_enter(); 396 lwkt_replymsg(&nmsg->lmsg, 0); 397 crit_exit(); 398 399 arg.found = arg.killed = 0; 400 arg.rnh = rnh; 401 arg.nextstop = 0; 402 arg.draining = 1; 403 arg.updating = 0; 404 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 405 } 406 407 static void 408 in_rtqdrain_ipi(void *arg __unused) 409 { 410 int cpu = mycpuid; 411 struct lwkt_msg *msg = &in_rtqdrain_netmsg[cpu].lmsg; 412 413 crit_enter(); 414 if (msg->ms_flags & MSGF_DONE) 415 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 416 crit_exit(); 417 } 418 419 void 420 in_rtqdrain(void) 421 { 422 cpumask_t mask; 423 424 CPUMASK_ASSBMASK(mask, ncpus); 425 CPUMASK_ANDMASK(mask, smp_active_mask); 426 if (CPUMASK_TESTNZERO(mask)) 427 lwkt_send_ipiq_mask(mask, in_rtqdrain_ipi, NULL); 428 } 429 430 /* 431 * Initialize our routing tree. 432 */ 433 int 434 in_inithead(void **head, int off) 435 { 436 struct radix_node_head *rnh; 437 struct in_rtqtimo_ctx *ctx; 438 int cpuid = mycpuid; 439 440 KKASSERT(head == (void **)&rt_tables[cpuid][AF_INET]); 441 442 if (!rn_inithead(head, rn_cpumaskhead(cpuid), off)) 443 return 0; 444 445 rnh = *head; 446 rnh->rnh_addaddr = in_addroute; 447 rnh->rnh_matchaddr = in_matchroute; 448 rnh->rnh_close = in_closeroute; 449 450 ctx = &in_rtqtimo_context[cpuid]; 451 ctx->timo_rnh = rnh; 452 callout_init_mp(&ctx->timo_ch); 453 netmsg_init(&ctx->timo_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY, 454 in_rtqtimo_dispatch); 455 netmsg_init(&in_rtqdrain_netmsg[cpuid], NULL, &netisr_adone_rport, 456 MSGF_PRIORITY, in_rtqdrain_dispatch); 457 458 in_rtqtimo(NULL); /* kick off timeout first time */ 459 return 1; 460 } 461 462 /* 463 * This zaps old routes when the interface goes down or interface 464 * address is deleted. In the latter case, it deletes static routes 465 * that point to this address. If we don't do this, we may end up 466 * using the old address in the future. The ones we always want to 467 * get rid of are things like ARP entries, since the user might down 468 * the interface, walk over to a completely different network, and 469 * plug back in. 470 * 471 * in_ifadown() is typically called when an interface is being brought 472 * down. We must iterate through all per-cpu route tables and clean 473 * them up. 474 */ 475 struct in_ifadown_arg { 476 struct radix_node_head *rnh; 477 struct ifaddr *ifa; 478 int del; 479 }; 480 481 static int 482 in_ifadownkill(struct radix_node *rn, void *xap) 483 { 484 struct in_ifadown_arg *ap = xap; 485 struct rtentry *rt = (struct rtentry *)rn; 486 int err; 487 488 if (rt->rt_ifa == ap->ifa && 489 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 490 /* 491 * We need to disable the automatic prune that happens 492 * in this case in rtrequest() because it will blow 493 * away the pointers that rn_walktree() needs in order 494 * continue our descent. We will end up deleting all 495 * the routes that rtrequest() would have in any case, 496 * so that behavior is not needed there. 497 */ 498 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING); 499 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 500 rt_mask(rt), rt->rt_flags, NULL); 501 if (err) 502 log(LOG_WARNING, "in_ifadownkill: error %d\n", err); 503 } 504 return 0; 505 } 506 507 struct netmsg_ifadown { 508 struct netmsg_base base; 509 struct ifaddr *ifa; 510 int del; 511 }; 512 513 static void 514 in_ifadown_dispatch(netmsg_t msg) 515 { 516 struct netmsg_ifadown *rmsg = (void *)msg; 517 struct radix_node_head *rnh; 518 struct ifaddr *ifa = rmsg->ifa; 519 struct in_ifadown_arg arg; 520 int nextcpu, cpu; 521 522 cpu = mycpuid; 523 524 arg.rnh = rnh = rt_tables[cpu][AF_INET]; 525 arg.ifa = ifa; 526 arg.del = rmsg->del; 527 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 528 ifa->ifa_flags &= ~IFA_ROUTE; 529 530 nextcpu = cpu + 1; 531 if (nextcpu < ncpus) 532 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg); 533 else 534 lwkt_replymsg(&rmsg->base.lmsg, 0); 535 } 536 537 int 538 in_ifadown_force(struct ifaddr *ifa, int delete) 539 { 540 struct netmsg_ifadown msg; 541 542 if (ifa->ifa_addr->sa_family != AF_INET) 543 return 1; 544 545 /* 546 * XXX individual requests are not independantly chained, 547 * which means that the per-cpu route tables will not be 548 * consistent in the middle of the operation. If routes 549 * related to the interface are manipulated while we are 550 * doing this the inconsistancy could trigger a panic. 551 */ 552 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 553 in_ifadown_dispatch); 554 msg.ifa = ifa; 555 msg.del = delete; 556 rt_domsg_global(&msg.base); 557 558 return 0; 559 } 560 561 int 562 in_ifadown(struct ifaddr *ifa, int delete) 563 { 564 #ifdef CARP 565 if (ifa->ifa_ifp->if_type == IFT_CARP) 566 return 0; 567 #endif 568 return in_ifadown_force(ifa, delete); 569 } 570