1 /* 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $ 30 * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $ 31 */ 32 33 /* 34 * This code does two things necessary for the enhanced TCP metrics to 35 * function in a useful manner: 36 * 1) It marks all non-host routes as `cloning', thus ensuring that 37 * every actual reference to such a route actually gets turned 38 * into a reference to a host route to the specific destination 39 * requested. 40 * 2) When such routes lose all their references, it arranges for them 41 * to be deleted in some random collection of circumstances, so that 42 * a large quantity of stale routing data is not kept in kernel memory 43 * indefinitely. See in_rtqtimo() below for the exact mechanism. 44 */ 45 46 #include "opt_carp.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/sysctl.h> 52 #include <sys/socket.h> 53 #include <sys/mbuf.h> 54 #include <sys/syslog.h> 55 #include <sys/globaldata.h> 56 #include <sys/thread2.h> 57 58 #include <net/if.h> 59 #include <net/route.h> 60 #include <net/if_var.h> 61 #ifdef CARP 62 #include <net/if_types.h> 63 #endif 64 #include <net/netmsg2.h> 65 #include <net/netisr2.h> 66 #include <netinet/in.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/ip_flow.h> 70 71 #define RTPRF_EXPIRING RTF_PROTO3 /* set on routes we manage */ 72 73 struct in_rtq_pcpu { 74 struct radix_node_head *rnh; 75 76 struct callout timo_ch; 77 struct netmsg_base timo_nmsg; 78 79 time_t lastdrain; 80 int draining; 81 struct netmsg_base drain_nmsg; 82 } __cachealign; 83 84 static void in_rtqtimo(void *); 85 86 static struct in_rtq_pcpu in_rtq_pcpu[MAXCPU]; 87 88 /* 89 * Do what we need to do when inserting a route. 90 */ 91 static struct radix_node * 92 in_addroute(char *key, char *mask, struct radix_node_head *head, 93 struct radix_node *treenodes) 94 { 95 struct rtentry *rt = (struct rtentry *)treenodes; 96 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 97 struct radix_node *ret; 98 struct in_ifaddr_container *iac; 99 struct in_ifaddr *ia; 100 101 /* 102 * For IP, mark routes to multicast addresses as such, because 103 * it's easy to do and might be useful (but this is much more 104 * dubious since it's so easy to inspect the address). 105 * 106 * For IP, all unicast non-host routes are automatically cloning. 107 */ 108 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 109 rt->rt_flags |= RTF_MULTICAST; 110 111 if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) 112 rt->rt_flags |= RTF_PRCLONING; 113 114 /* 115 * For host routes, we make sure that RTF_BROADCAST 116 * is set for anything that looks like a broadcast address. 117 * This way, we can avoid an expensive call to in_broadcast() 118 * in ip_output() most of the time (because the route passed 119 * to ip_output() is almost always a host route). 120 * 121 * For local routes we set RTF_LOCAL allowing various shortcuts. 122 * 123 * A cloned network route will point to one of several possible 124 * addresses if an interface has aliases and must be repointed 125 * back to the correct address or arp_rtrequest() will not properly 126 * detect the local ip. 127 */ 128 if (rt->rt_flags & RTF_HOST) { 129 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 130 rt->rt_flags |= RTF_BROADCAST; 131 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 132 sin->sin_addr.s_addr) { 133 rt->rt_flags |= RTF_LOCAL; 134 } else { 135 LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr), 136 ia_hash) { 137 ia = iac->ia; 138 if (sin->sin_addr.s_addr == 139 ia->ia_addr.sin_addr.s_addr) { 140 rt->rt_flags |= RTF_LOCAL; 141 IFAREF(&ia->ia_ifa); 142 IFAFREE(rt->rt_ifa); 143 rt->rt_ifa = &ia->ia_ifa; 144 rt->rt_ifp = rt->rt_ifa->ifa_ifp; 145 break; 146 } 147 } 148 } 149 } 150 151 if (rt->rt_rmx.rmx_mtu == 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) && 152 rt->rt_ifp != NULL) 153 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 154 155 ret = rn_addroute(key, mask, head, treenodes); 156 if (ret == NULL && (rt->rt_flags & RTF_HOST)) { 157 struct rtentry *oldrt; 158 159 /* 160 * We are trying to add a host route, but can't. 161 * Find out if it is because of an ARP entry and 162 * delete it if so. 163 */ 164 oldrt = rtpurelookup((struct sockaddr *)sin); 165 if (oldrt != NULL) { 166 --oldrt->rt_refcnt; 167 if ((oldrt->rt_flags & RTF_LLINFO) && 168 (oldrt->rt_flags & RTF_HOST) && 169 oldrt->rt_gateway && 170 oldrt->rt_gateway->sa_family == AF_LINK) { 171 rtrequest(RTM_DELETE, rt_key(oldrt), 172 oldrt->rt_gateway, rt_mask(oldrt), 173 oldrt->rt_flags, NULL); 174 ret = rn_addroute(key, mask, head, treenodes); 175 } 176 } 177 } 178 179 /* 180 * If the new route has been created successfully, and it is 181 * not a multicast/broadcast or cloned route, then we will 182 * have to flush the ipflow. Otherwise, we may end up using 183 * the wrong route. 184 */ 185 if (ret != NULL && 186 (rt->rt_flags & 187 (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0) 188 ipflow_flush_oncpu(); 189 return ret; 190 } 191 192 /* 193 * This code is the inverse of in_closeroute: on first reference, if we 194 * were managing the route, stop doing so and set the expiration timer 195 * back off again. 196 */ 197 static struct radix_node * 198 in_matchroute(char *key, struct radix_node_head *head) 199 { 200 struct radix_node *rn = rn_match(key, head); 201 struct rtentry *rt = (struct rtentry *)rn; 202 203 if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */ 204 if (rt->rt_flags & RTPRF_EXPIRING) { 205 rt->rt_flags &= ~RTPRF_EXPIRING; 206 rt->rt_rmx.rmx_expire = 0; 207 } 208 } 209 return rn; 210 } 211 212 static int rtq_reallyold = 60*60; /* one hour is ``really old'' */ 213 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 214 &rtq_reallyold , 0, 215 "Default expiration time on cloned routes"); 216 217 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 218 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 219 &rtq_minreallyold , 0, 220 "Minimum time to attempt to hold onto cloned routes"); 221 222 static int rtq_toomany = 128; /* 128 cached routes is ``too many'' */ 223 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 224 &rtq_toomany , 0, "Upper limit on cloned routes"); 225 226 /* 227 * On last reference drop, mark the route as belong to us so that it can be 228 * timed out. 229 */ 230 static void 231 in_closeroute(struct radix_node *rn, struct radix_node_head *head) 232 { 233 struct rtentry *rt = (struct rtentry *)rn; 234 235 if (!(rt->rt_flags & RTF_UP)) 236 return; /* prophylactic measures */ 237 238 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 239 return; 240 241 if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED) 242 return; 243 244 /* 245 * As requested by David Greenman: 246 * If rtq_reallyold is 0, just delete the route without 247 * waiting for a timeout cycle to kill it. 248 */ 249 if (rtq_reallyold != 0) { 250 rt->rt_flags |= RTPRF_EXPIRING; 251 rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold; 252 } else { 253 /* 254 * Remove route from the radix tree, but defer deallocation 255 * until we return to rtfree(). 256 */ 257 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), 258 rt->rt_flags, &rt); 259 } 260 } 261 262 struct rtqk_arg { 263 struct radix_node_head *rnh; 264 int draining; 265 int killed; 266 int found; 267 int updating; 268 time_t nextstop; 269 }; 270 271 /* 272 * Get rid of old routes. When draining, this deletes everything, even when 273 * the timeout is not expired yet. When updating, this makes sure that 274 * nothing has a timeout longer than the current value of rtq_reallyold. 275 */ 276 static int 277 in_rtqkill(struct radix_node *rn, void *rock) 278 { 279 struct rtqk_arg *ap = rock; 280 struct rtentry *rt = (struct rtentry *)rn; 281 int err; 282 283 if (rt->rt_flags & RTPRF_EXPIRING) { 284 ap->found++; 285 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { 286 if (rt->rt_refcnt > 0) 287 panic("rtqkill route really not free"); 288 289 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 290 rt_mask(rt), rt->rt_flags, NULL); 291 if (err) 292 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 293 else 294 ap->killed++; 295 } else { 296 if (ap->updating && 297 (int)(rt->rt_rmx.rmx_expire - time_uptime) > 298 rtq_reallyold) { 299 rt->rt_rmx.rmx_expire = time_uptime + 300 rtq_reallyold; 301 } 302 ap->nextstop = lmin(ap->nextstop, 303 rt->rt_rmx.rmx_expire); 304 } 305 } 306 307 return 0; 308 } 309 310 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 311 static int rtq_timeout = RTQ_TIMEOUT; 312 313 /* 314 * NOTE: 315 * 'last_adjusted_timeout' and 'rtq_reallyold' are _not_ read-only, and 316 * could be changed by all CPUs. However, they are changed at so low 317 * frequency that we could ignore the cache trashing issue and take them 318 * as read-mostly. 319 */ 320 static void 321 in_rtqtimo_dispatch(netmsg_t nmsg) 322 { 323 struct rtqk_arg arg; 324 struct timeval atv; 325 static time_t last_adjusted_timeout = 0; 326 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid]; 327 struct radix_node_head *rnh = pcpu->rnh; 328 329 ASSERT_NETISR_NCPUS(mycpuid); 330 331 /* Reply ASAP */ 332 crit_enter(); 333 lwkt_replymsg(&nmsg->lmsg, 0); 334 crit_exit(); 335 336 arg.found = arg.killed = 0; 337 arg.rnh = rnh; 338 arg.nextstop = time_uptime + rtq_timeout; 339 arg.draining = arg.updating = 0; 340 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 341 342 /* 343 * Attempt to be somewhat dynamic about this: 344 * If there are ``too many'' routes sitting around taking up space, 345 * then crank down the timeout, and see if we can't make some more 346 * go away. However, we make sure that we will never adjust more 347 * than once in rtq_timeout seconds, to keep from cranking down too 348 * hard. 349 */ 350 if ((arg.found - arg.killed > rtq_toomany) && 351 (int)(time_uptime - last_adjusted_timeout) >= rtq_timeout && 352 rtq_reallyold > rtq_minreallyold) { 353 rtq_reallyold = 2*rtq_reallyold / 3; 354 if (rtq_reallyold < rtq_minreallyold) { 355 rtq_reallyold = rtq_minreallyold; 356 } 357 358 last_adjusted_timeout = time_uptime; 359 #ifdef DIAGNOSTIC 360 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 361 rtq_reallyold); 362 #endif 363 arg.found = arg.killed = 0; 364 arg.updating = 1; 365 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 366 } 367 368 atv.tv_usec = 0; 369 atv.tv_sec = arg.nextstop - time_uptime; 370 if ((int)atv.tv_sec < 1) { /* time shift safety */ 371 atv.tv_sec = 1; 372 arg.nextstop = time_uptime + atv.tv_sec; 373 } 374 if ((int)atv.tv_sec > rtq_timeout) { /* time shift safety */ 375 atv.tv_sec = rtq_timeout; 376 arg.nextstop = time_uptime + atv.tv_sec; 377 } 378 callout_reset(&pcpu->timo_ch, tvtohz_high(&atv), in_rtqtimo, NULL); 379 } 380 381 static void 382 in_rtqtimo(void *arg __unused) 383 { 384 int cpuid = mycpuid; 385 struct lwkt_msg *lmsg = &in_rtq_pcpu[cpuid].timo_nmsg.lmsg; 386 387 crit_enter(); 388 if (lmsg->ms_flags & MSGF_DONE) 389 lwkt_sendmsg_oncpu(netisr_cpuport(cpuid), lmsg); 390 crit_exit(); 391 } 392 393 static void 394 in_rtqdrain_oncpu(struct in_rtq_pcpu *pcpu) 395 { 396 struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET]; 397 struct rtqk_arg arg; 398 399 ASSERT_NETISR_NCPUS(mycpuid); 400 401 arg.found = arg.killed = 0; 402 arg.rnh = rnh; 403 arg.nextstop = 0; 404 arg.draining = 1; 405 arg.updating = 0; 406 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 407 408 pcpu->lastdrain = time_uptime; 409 } 410 411 static void 412 in_rtqdrain_dispatch(netmsg_t nmsg) 413 { 414 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid]; 415 416 /* Reply ASAP */ 417 crit_enter(); 418 lwkt_replymsg(&nmsg->lmsg, 0); 419 crit_exit(); 420 421 in_rtqdrain_oncpu(pcpu); 422 pcpu->draining = 0; 423 } 424 425 static void 426 in_rtqdrain_ipi(void *arg __unused) 427 { 428 int cpu = mycpuid; 429 struct lwkt_msg *msg = &in_rtq_pcpu[cpu].drain_nmsg.lmsg; 430 431 crit_enter(); 432 if (msg->ms_flags & MSGF_DONE) 433 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 434 crit_exit(); 435 } 436 437 void 438 in_rtqdrain(void) 439 { 440 cpumask_t mask; 441 int cpu; 442 443 CPUMASK_ASSBMASK(mask, netisr_ncpus); 444 CPUMASK_ANDMASK(mask, smp_active_mask); 445 446 cpu = mycpuid; 447 if (IN_NETISR_NCPUS(cpu)) { 448 in_rtqdrain_oncpu(&in_rtq_pcpu[cpu]); 449 CPUMASK_NANDBIT(mask, cpu); 450 } 451 452 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 453 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[cpu]; 454 455 if (!CPUMASK_TESTBIT(mask, cpu)) 456 continue; 457 458 if (pcpu->draining || pcpu->lastdrain == time_uptime) { 459 /* Just drained or is draining; skip this cpu. */ 460 CPUMASK_NANDBIT(mask, cpu); 461 continue; 462 } 463 pcpu->draining = 1; 464 } 465 466 if (CPUMASK_TESTNZERO(mask)) 467 lwkt_send_ipiq_mask(mask, in_rtqdrain_ipi, NULL); 468 } 469 470 /* 471 * Initialize our routing tree. 472 */ 473 int 474 in_inithead(void **head, int off) 475 { 476 struct radix_node_head *rnh; 477 struct in_rtq_pcpu *pcpu; 478 int cpuid = mycpuid; 479 480 KKASSERT(head == (void **)&rt_tables[cpuid][AF_INET]); 481 482 if (!rn_inithead(head, rn_cpumaskhead(cpuid), off)) 483 return 0; 484 485 rnh = *head; 486 rnh->rnh_addaddr = in_addroute; 487 rnh->rnh_matchaddr = in_matchroute; 488 rnh->rnh_close = in_closeroute; 489 490 pcpu = &in_rtq_pcpu[cpuid]; 491 pcpu->rnh = rnh; 492 callout_init_mp(&pcpu->timo_ch); 493 netmsg_init(&pcpu->timo_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY, 494 in_rtqtimo_dispatch); 495 netmsg_init(&pcpu->drain_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY, 496 in_rtqdrain_dispatch); 497 498 in_rtqtimo(NULL); /* kick off timeout first time */ 499 return 1; 500 } 501 502 /* 503 * This zaps old routes when the interface goes down or interface 504 * address is deleted. In the latter case, it deletes static routes 505 * that point to this address. If we don't do this, we may end up 506 * using the old address in the future. The ones we always want to 507 * get rid of are things like ARP entries, since the user might down 508 * the interface, walk over to a completely different network, and 509 * plug back in. 510 * 511 * in_ifadown() is typically called when an interface is being brought 512 * down. We must iterate through all per-cpu route tables and clean 513 * them up. 514 */ 515 struct in_ifadown_arg { 516 struct radix_node_head *rnh; 517 struct ifaddr *ifa; 518 int del; 519 }; 520 521 static int 522 in_ifadownkill(struct radix_node *rn, void *xap) 523 { 524 struct in_ifadown_arg *ap = xap; 525 struct rtentry *rt = (struct rtentry *)rn; 526 int err; 527 528 if (rt->rt_ifa == ap->ifa && 529 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 530 /* 531 * We need to disable the automatic prune that happens 532 * in this case in rtrequest() because it will blow 533 * away the pointers that rn_walktree() needs in order 534 * continue our descent. We will end up deleting all 535 * the routes that rtrequest() would have in any case, 536 * so that behavior is not needed there. 537 */ 538 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING); 539 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 540 rt_mask(rt), rt->rt_flags, NULL); 541 if (err) 542 log(LOG_WARNING, "in_ifadownkill: error %d\n", err); 543 } 544 return 0; 545 } 546 547 struct netmsg_ifadown { 548 struct netmsg_base base; 549 struct ifaddr *ifa; 550 int del; 551 }; 552 553 static void 554 in_ifadown_dispatch(netmsg_t msg) 555 { 556 struct netmsg_ifadown *rmsg = (void *)msg; 557 struct radix_node_head *rnh; 558 struct ifaddr *ifa = rmsg->ifa; 559 struct in_ifadown_arg arg; 560 int cpu; 561 562 cpu = mycpuid; 563 ASSERT_NETISR_NCPUS(cpu); 564 565 arg.rnh = rnh = rt_tables[cpu][AF_INET]; 566 arg.ifa = ifa; 567 arg.del = rmsg->del; 568 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 569 ifa->ifa_flags &= ~IFA_ROUTE; 570 571 netisr_forwardmsg(&msg->base, cpu + 1); 572 } 573 574 int 575 in_ifadown_force(struct ifaddr *ifa, int delete) 576 { 577 struct netmsg_ifadown msg; 578 579 if (ifa->ifa_addr->sa_family != AF_INET) 580 return 1; 581 582 /* 583 * XXX individual requests are not independantly chained, 584 * which means that the per-cpu route tables will not be 585 * consistent in the middle of the operation. If routes 586 * related to the interface are manipulated while we are 587 * doing this the inconsistancy could trigger a panic. 588 */ 589 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 590 in_ifadown_dispatch); 591 msg.ifa = ifa; 592 msg.del = delete; 593 netisr_domsg_global(&msg.base); 594 595 return 0; 596 } 597 598 int 599 in_ifadown(struct ifaddr *ifa, int delete) 600 { 601 #ifdef CARP 602 if (ifa->ifa_ifp->if_type == IFT_CARP) 603 return 0; 604 #endif 605 return in_ifadown_force(ifa, delete); 606 } 607