1 /* 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $ 30 * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $ 31 */ 32 33 /* 34 * This code does two things necessary for the enhanced TCP metrics to 35 * function in a useful manner: 36 * 1) It marks all non-host routes as `cloning', thus ensuring that 37 * every actual reference to such a route actually gets turned 38 * into a reference to a host route to the specific destination 39 * requested. 40 * 2) When such routes lose all their references, it arranges for them 41 * to be deleted in some random collection of circumstances, so that 42 * a large quantity of stale routing data is not kept in kernel memory 43 * indefinitely. See in_rtqtimo() below for the exact mechanism. 44 */ 45 46 #include "opt_carp.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/sysctl.h> 52 #include <sys/socket.h> 53 #include <sys/mbuf.h> 54 #include <sys/syslog.h> 55 #include <sys/globaldata.h> 56 #include <sys/thread2.h> 57 58 #include <net/if.h> 59 #include <net/route.h> 60 #include <net/if_var.h> 61 #ifdef CARP 62 #include <net/if_types.h> 63 #endif 64 #include <net/netmsg2.h> 65 #include <net/netisr2.h> 66 #include <netinet/in.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/ip_flow.h> 70 71 #define RTPRF_EXPIRING RTF_PROTO3 /* set on routes we manage */ 72 73 struct in_rtq_pcpu { 74 struct radix_node_head *rnh; 75 76 struct callout timo_ch; 77 struct netmsg_base timo_nmsg; 78 79 time_t lastdrain; 80 int draining; 81 struct netmsg_base drain_nmsg; 82 } __cachealign; 83 84 static void in_rtqtimo(void *); 85 86 static struct in_rtq_pcpu in_rtq_pcpu[MAXCPU]; 87 88 /* 89 * Do what we need to do when inserting a route. 90 */ 91 static struct radix_node * 92 in_addroute(char *key, char *mask, struct radix_node_head *head, 93 struct radix_node *treenodes) 94 { 95 struct rtentry *rt = (struct rtentry *)treenodes; 96 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 97 struct radix_node *ret; 98 struct in_ifaddr_container *iac; 99 struct in_ifaddr *ia; 100 101 /* 102 * For IP, mark routes to multicast addresses as such, because 103 * it's easy to do and might be useful (but this is much more 104 * dubious since it's so easy to inspect the address). 105 * 106 * For IP, all unicast non-host routes are automatically cloning. 107 */ 108 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 109 rt->rt_flags |= RTF_MULTICAST; 110 111 if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) 112 rt->rt_flags |= RTF_PRCLONING; 113 114 /* 115 * For host routes, we make sure that RTF_BROADCAST 116 * is set for anything that looks like a broadcast address. 117 * This way, we can avoid an expensive call to in_broadcast() 118 * in ip_output() most of the time (because the route passed 119 * to ip_output() is almost always a host route). 120 * 121 * For local routes we set RTF_LOCAL allowing various shortcuts. 122 * 123 * A cloned network route will point to one of several possible 124 * addresses if an interface has aliases and must be repointed 125 * back to the correct address or arp_rtrequest() will not properly 126 * detect the local ip. 127 */ 128 if (rt->rt_flags & RTF_HOST) { 129 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 130 rt->rt_flags |= RTF_BROADCAST; 131 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 132 sin->sin_addr.s_addr) { 133 rt->rt_flags |= RTF_LOCAL; 134 } else { 135 LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr), 136 ia_hash) { 137 ia = iac->ia; 138 if (sin->sin_addr.s_addr == 139 ia->ia_addr.sin_addr.s_addr) { 140 rt->rt_flags |= RTF_LOCAL; 141 IFAREF(&ia->ia_ifa); 142 IFAFREE(rt->rt_ifa); 143 rt->rt_ifa = &ia->ia_ifa; 144 rt->rt_ifp = rt->rt_ifa->ifa_ifp; 145 break; 146 } 147 } 148 } 149 } 150 151 if (rt->rt_rmx.rmx_mtu == 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) && 152 rt->rt_ifp != NULL) 153 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 154 155 ret = rn_addroute(key, mask, head, treenodes); 156 if (ret == NULL && (rt->rt_flags & RTF_HOST)) { 157 struct rtentry *oldrt; 158 159 /* 160 * We are trying to add a host route, but can't. 161 * Find out if it is because of an ARP entry and 162 * delete it if so. 163 */ 164 oldrt = rtpurelookup((struct sockaddr *)sin); 165 if (oldrt != NULL) { 166 --oldrt->rt_refcnt; 167 if ((oldrt->rt_flags & RTF_LLINFO) && 168 (oldrt->rt_flags & RTF_HOST) && 169 oldrt->rt_gateway && 170 oldrt->rt_gateway->sa_family == AF_LINK) { 171 rtrequest(RTM_DELETE, rt_key(oldrt), 172 oldrt->rt_gateway, rt_mask(oldrt), 173 oldrt->rt_flags, NULL); 174 ret = rn_addroute(key, mask, head, treenodes); 175 } 176 } 177 } 178 179 /* 180 * If the new route has been created successfully, and it is 181 * not a multicast/broadcast or cloned route, then we will 182 * have to flush the ipflow. Otherwise, we may end up using 183 * the wrong route. 184 */ 185 if (ret != NULL && 186 (rt->rt_flags & 187 (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0) { 188 ipflow_flush_oncpu(); 189 } 190 return ret; 191 } 192 193 /* 194 * This code is the inverse of in_closeroute: on first reference, if we 195 * were managing the route, stop doing so and set the expiration timer 196 * back off again. 197 */ 198 static struct radix_node * 199 in_matchroute(char *key, struct radix_node_head *head) 200 { 201 struct radix_node *rn = rn_match(key, head); 202 struct rtentry *rt = (struct rtentry *)rn; 203 204 if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */ 205 if (rt->rt_flags & RTPRF_EXPIRING) { 206 rt->rt_flags &= ~RTPRF_EXPIRING; 207 rt->rt_rmx.rmx_expire = 0; 208 } 209 } 210 return rn; 211 } 212 213 static int rtq_reallyold = 60*60; /* one hour is ``really old'' */ 214 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 215 &rtq_reallyold , 0, 216 "Default expiration time on cloned routes"); 217 218 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 219 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 220 &rtq_minreallyold , 0, 221 "Minimum time to attempt to hold onto cloned routes"); 222 223 static int rtq_toomany = 128; /* 128 cached routes is ``too many'' */ 224 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 225 &rtq_toomany , 0, "Upper limit on cloned routes"); 226 227 /* 228 * On last reference drop, mark the route as belong to us so that it can be 229 * timed out. 230 */ 231 static void 232 in_closeroute(struct radix_node *rn, struct radix_node_head *head) 233 { 234 struct rtentry *rt = (struct rtentry *)rn; 235 236 if (!(rt->rt_flags & RTF_UP)) 237 return; /* prophylactic measures */ 238 239 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 240 return; 241 242 if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED) 243 return; 244 245 /* 246 * As requested by David Greenman: 247 * If rtq_reallyold is 0, just delete the route without 248 * waiting for a timeout cycle to kill it. 249 */ 250 if (rtq_reallyold != 0) { 251 rt->rt_flags |= RTPRF_EXPIRING; 252 rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold; 253 } else { 254 /* 255 * Remove route from the radix tree, but defer deallocation 256 * until we return to rtfree(). 257 */ 258 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), 259 rt->rt_flags, &rt); 260 } 261 } 262 263 struct rtqk_arg { 264 struct radix_node_head *rnh; 265 int draining; 266 int killed; 267 int found; 268 int updating; 269 time_t nextstop; 270 }; 271 272 /* 273 * Get rid of old routes. When draining, this deletes everything, even when 274 * the timeout is not expired yet. When updating, this makes sure that 275 * nothing has a timeout longer than the current value of rtq_reallyold. 276 */ 277 static int 278 in_rtqkill(struct radix_node *rn, void *rock) 279 { 280 struct rtqk_arg *ap = rock; 281 struct rtentry *rt = (struct rtentry *)rn; 282 int err; 283 284 if (rt->rt_flags & RTPRF_EXPIRING) { 285 ap->found++; 286 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { 287 if (rt->rt_refcnt > 0) 288 panic("rtqkill route really not free"); 289 290 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 291 rt_mask(rt), rt->rt_flags, NULL); 292 if (err) 293 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 294 else 295 ap->killed++; 296 } else { 297 if (ap->updating && 298 (int)(rt->rt_rmx.rmx_expire - time_uptime) > 299 rtq_reallyold) { 300 rt->rt_rmx.rmx_expire = time_uptime + 301 rtq_reallyold; 302 } 303 ap->nextstop = lmin(ap->nextstop, 304 rt->rt_rmx.rmx_expire); 305 } 306 } 307 308 return 0; 309 } 310 311 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 312 static int rtq_timeout = RTQ_TIMEOUT; 313 314 /* 315 * NOTE: 316 * 'last_adjusted_timeout' and 'rtq_reallyold' are _not_ read-only, and 317 * could be changed by all CPUs. However, they are changed at so low 318 * frequency that we could ignore the cache trashing issue and take them 319 * as read-mostly. 320 */ 321 static void 322 in_rtqtimo_dispatch(netmsg_t nmsg) 323 { 324 struct rtqk_arg arg; 325 struct timeval atv; 326 static time_t last_adjusted_timeout = 0; 327 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid]; 328 struct radix_node_head *rnh = pcpu->rnh; 329 330 ASSERT_NETISR_NCPUS(mycpuid); 331 332 /* Reply ASAP */ 333 crit_enter(); 334 lwkt_replymsg(&nmsg->lmsg, 0); 335 crit_exit(); 336 337 arg.found = arg.killed = 0; 338 arg.rnh = rnh; 339 arg.nextstop = time_uptime + rtq_timeout; 340 arg.draining = arg.updating = 0; 341 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 342 343 /* 344 * Attempt to be somewhat dynamic about this: 345 * If there are ``too many'' routes sitting around taking up space, 346 * then crank down the timeout, and see if we can't make some more 347 * go away. However, we make sure that we will never adjust more 348 * than once in rtq_timeout seconds, to keep from cranking down too 349 * hard. 350 */ 351 if ((arg.found - arg.killed > rtq_toomany) && 352 (int)(time_uptime - last_adjusted_timeout) >= rtq_timeout && 353 rtq_reallyold > rtq_minreallyold) { 354 rtq_reallyold = 2*rtq_reallyold / 3; 355 if (rtq_reallyold < rtq_minreallyold) { 356 rtq_reallyold = rtq_minreallyold; 357 } 358 359 last_adjusted_timeout = time_uptime; 360 #ifdef DIAGNOSTIC 361 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 362 rtq_reallyold); 363 #endif 364 arg.found = arg.killed = 0; 365 arg.updating = 1; 366 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 367 } 368 369 atv.tv_usec = 0; 370 atv.tv_sec = arg.nextstop - time_uptime; 371 if ((int)atv.tv_sec < 1) { /* time shift safety */ 372 atv.tv_sec = 1; 373 arg.nextstop = time_uptime + atv.tv_sec; 374 } 375 if ((int)atv.tv_sec > rtq_timeout) { /* time shift safety */ 376 atv.tv_sec = rtq_timeout; 377 arg.nextstop = time_uptime + atv.tv_sec; 378 } 379 callout_reset(&pcpu->timo_ch, tvtohz_high(&atv), in_rtqtimo, NULL); 380 } 381 382 static void 383 in_rtqtimo(void *arg __unused) 384 { 385 int cpuid = mycpuid; 386 struct lwkt_msg *lmsg = &in_rtq_pcpu[cpuid].timo_nmsg.lmsg; 387 388 crit_enter(); 389 if (lmsg->ms_flags & MSGF_DONE) 390 lwkt_sendmsg_oncpu(netisr_cpuport(cpuid), lmsg); 391 crit_exit(); 392 } 393 394 static void 395 in_rtqdrain_oncpu(struct in_rtq_pcpu *pcpu) 396 { 397 struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET]; 398 struct rtqk_arg arg; 399 400 ASSERT_NETISR_NCPUS(mycpuid); 401 402 arg.found = arg.killed = 0; 403 arg.rnh = rnh; 404 arg.nextstop = 0; 405 arg.draining = 1; 406 arg.updating = 0; 407 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 408 409 pcpu->lastdrain = time_uptime; 410 } 411 412 static void 413 in_rtqdrain_dispatch(netmsg_t nmsg) 414 { 415 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid]; 416 417 /* Reply ASAP */ 418 crit_enter(); 419 lwkt_replymsg(&nmsg->lmsg, 0); 420 crit_exit(); 421 422 in_rtqdrain_oncpu(pcpu); 423 pcpu->draining = 0; 424 } 425 426 static void 427 in_rtqdrain_ipi(void *arg __unused) 428 { 429 int cpu = mycpuid; 430 struct lwkt_msg *msg = &in_rtq_pcpu[cpu].drain_nmsg.lmsg; 431 432 crit_enter(); 433 if (msg->ms_flags & MSGF_DONE) 434 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 435 crit_exit(); 436 } 437 438 void 439 in_rtqdrain(void) 440 { 441 cpumask_t mask; 442 int cpu; 443 444 CPUMASK_ASSBMASK(mask, netisr_ncpus); 445 CPUMASK_ANDMASK(mask, smp_active_mask); 446 447 cpu = mycpuid; 448 if (IN_NETISR_NCPUS(cpu)) { 449 in_rtqdrain_oncpu(&in_rtq_pcpu[cpu]); 450 CPUMASK_NANDBIT(mask, cpu); 451 } 452 453 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 454 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[cpu]; 455 456 if (!CPUMASK_TESTBIT(mask, cpu)) 457 continue; 458 459 if (pcpu->draining || pcpu->lastdrain == time_uptime) { 460 /* Just drained or is draining; skip this cpu. */ 461 CPUMASK_NANDBIT(mask, cpu); 462 continue; 463 } 464 pcpu->draining = 1; 465 } 466 467 if (CPUMASK_TESTNZERO(mask)) 468 lwkt_send_ipiq_mask(mask, in_rtqdrain_ipi, NULL); 469 } 470 471 /* 472 * Initialize our routing tree. 473 */ 474 int 475 in_inithead(void **head, int off) 476 { 477 struct radix_node_head *rnh; 478 struct in_rtq_pcpu *pcpu; 479 int cpuid = mycpuid; 480 481 KKASSERT(head == (void **)&rt_tables[cpuid][AF_INET]); 482 483 if (!rn_inithead(head, rn_cpumaskhead(cpuid), off)) 484 return 0; 485 486 rnh = *head; 487 rnh->rnh_addaddr = in_addroute; 488 rnh->rnh_matchaddr = in_matchroute; 489 rnh->rnh_close = in_closeroute; 490 491 pcpu = &in_rtq_pcpu[cpuid]; 492 pcpu->rnh = rnh; 493 callout_init_mp(&pcpu->timo_ch); 494 netmsg_init(&pcpu->timo_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY, 495 in_rtqtimo_dispatch); 496 netmsg_init(&pcpu->drain_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY, 497 in_rtqdrain_dispatch); 498 499 in_rtqtimo(NULL); /* kick off timeout first time */ 500 return 1; 501 } 502 503 /* 504 * This zaps old routes when the interface goes down or interface 505 * address is deleted. In the latter case, it deletes static routes 506 * that point to this address. If we don't do this, we may end up 507 * using the old address in the future. The ones we always want to 508 * get rid of are things like ARP entries, since the user might down 509 * the interface, walk over to a completely different network, and 510 * plug back in. 511 * 512 * in_ifadown() is typically called when an interface is being brought 513 * down. We must iterate through all per-cpu route tables and clean 514 * them up. 515 */ 516 struct in_ifadown_arg { 517 struct radix_node_head *rnh; 518 struct ifaddr *ifa; 519 int del; 520 }; 521 522 static int 523 in_ifadownkill(struct radix_node *rn, void *xap) 524 { 525 struct in_ifadown_arg *ap = xap; 526 struct rtentry *rt = (struct rtentry *)rn; 527 int err; 528 529 if (rt->rt_ifa == ap->ifa && 530 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 531 /* 532 * We need to disable the automatic prune that happens 533 * in this case in rtrequest() because it will blow 534 * away the pointers that rn_walktree() needs in order 535 * continue our descent. We will end up deleting all 536 * the routes that rtrequest() would have in any case, 537 * so that behavior is not needed there. 538 */ 539 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING); 540 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 541 rt_mask(rt), rt->rt_flags, NULL); 542 if (err) 543 log(LOG_WARNING, "in_ifadownkill: error %d\n", err); 544 } 545 return 0; 546 } 547 548 struct netmsg_ifadown { 549 struct netmsg_base base; 550 struct ifaddr *ifa; 551 int del; 552 }; 553 554 static void 555 in_ifadown_dispatch(netmsg_t msg) 556 { 557 struct netmsg_ifadown *rmsg = (void *)msg; 558 struct radix_node_head *rnh; 559 struct ifaddr *ifa = rmsg->ifa; 560 struct in_ifadown_arg arg; 561 int cpu; 562 563 cpu = mycpuid; 564 ASSERT_NETISR_NCPUS(cpu); 565 566 arg.rnh = rnh = rt_tables[cpu][AF_INET]; 567 arg.ifa = ifa; 568 arg.del = rmsg->del; 569 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 570 ifa->ifa_flags &= ~IFA_ROUTE; 571 572 netisr_forwardmsg(&msg->base, cpu + 1); 573 } 574 575 int 576 in_ifadown_force(struct ifaddr *ifa, int delete) 577 { 578 struct netmsg_ifadown msg; 579 580 if (ifa->ifa_addr->sa_family != AF_INET) 581 return 1; 582 583 /* 584 * XXX individual requests are not independantly chained, 585 * which means that the per-cpu route tables will not be 586 * consistent in the middle of the operation. If routes 587 * related to the interface are manipulated while we are 588 * doing this the inconsistancy could trigger a panic. 589 */ 590 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 591 in_ifadown_dispatch); 592 msg.ifa = ifa; 593 msg.del = delete; 594 netisr_domsg_global(&msg.base); 595 596 return 0; 597 } 598 599 int 600 in_ifadown(struct ifaddr *ifa, int delete) 601 { 602 #ifdef CARP 603 if (ifa->ifa_ifp->if_type == IFT_CARP) 604 return 0; 605 #endif 606 return in_ifadown_force(ifa, delete); 607 } 608