1 /* $OpenBSD: route.c,v 1.399 2021/05/25 22:45:09 bluhm Exp $ */ 2 /* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1980, 1986, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)route.c 8.2 (Berkeley) 11/15/93 62 */ 63 64 /* 65 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995 66 * 67 * NRL grants permission for redistribution and use in source and binary 68 * forms, with or without modification, of the software and documentation 69 * created at NRL provided that the following conditions are met: 70 * 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgements: 78 * This product includes software developed by the University of 79 * California, Berkeley and its contributors. 80 * This product includes software developed at the Information 81 * Technology Division, US Naval Research Laboratory. 82 * 4. Neither the name of the NRL nor the names of its contributors 83 * may be used to endorse or promote products derived from this software 84 * without specific prior written permission. 85 * 86 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS 87 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 88 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 89 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR 90 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 91 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 92 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 93 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 94 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 95 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 96 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 * 98 * The views and conclusions contained in the software and documentation 99 * are those of the authors and should not be interpreted as representing 100 * official policies, either expressed or implied, of the US Naval 101 * Research Laboratory (NRL). 102 */ 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/mbuf.h> 107 #include <sys/socket.h> 108 #include <sys/socketvar.h> 109 #include <sys/timeout.h> 110 #include <sys/domain.h> 111 #include <sys/protosw.h> 112 #include <sys/ioctl.h> 113 #include <sys/kernel.h> 114 #include <sys/queue.h> 115 #include <sys/pool.h> 116 #include <sys/atomic.h> 117 118 #include <net/if.h> 119 #include <net/if_var.h> 120 #include <net/if_dl.h> 121 #include <net/route.h> 122 123 #include <netinet/in.h> 124 #include <netinet/ip_var.h> 125 #include <netinet/in_var.h> 126 127 #ifdef INET6 128 #include <netinet/ip6.h> 129 #include <netinet6/ip6_var.h> 130 #include <netinet6/in6_var.h> 131 #endif 132 133 #ifdef MPLS 134 #include <netmpls/mpls.h> 135 #endif 136 137 #ifdef BFD 138 #include <net/bfd.h> 139 #endif 140 141 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) 142 143 /* Give some jitter to hash, to avoid synchronization between routers. */ 144 static uint32_t rt_hashjitter; 145 146 extern unsigned int rtmap_limit; 147 148 struct cpumem * rtcounters; 149 int rttrash; /* routes not in table but not freed */ 150 int ifatrash; /* ifas not in ifp list but not free */ 151 152 struct pool rtentry_pool; /* pool for rtentry structures */ 153 struct pool rttimer_pool; /* pool for rttimer structures */ 154 155 void rt_timer_init(void); 156 int rt_setgwroute(struct rtentry *, u_int); 157 void rt_putgwroute(struct rtentry *); 158 int rtflushclone1(struct rtentry *, void *, u_int); 159 int rtflushclone(struct rtentry *, unsigned int); 160 int rt_ifa_purge_walker(struct rtentry *, void *, unsigned int); 161 struct rtentry *rt_match(struct sockaddr *, uint32_t *, int, unsigned int); 162 int rt_clone(struct rtentry **, struct sockaddr *, unsigned int); 163 struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *); 164 static int rt_copysa(struct sockaddr *, struct sockaddr *, struct sockaddr **); 165 166 #ifdef DDB 167 void db_print_sa(struct sockaddr *); 168 void db_print_ifa(struct ifaddr *); 169 int db_show_rtentry(struct rtentry *, void *, unsigned int); 170 #endif 171 172 #define LABELID_MAX 50000 173 174 struct rt_label { 175 TAILQ_ENTRY(rt_label) rtl_entry; 176 char rtl_name[RTLABEL_LEN]; 177 u_int16_t rtl_id; 178 int rtl_ref; 179 }; 180 181 TAILQ_HEAD(rt_labels, rt_label) rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels); 182 183 void 184 route_init(void) 185 { 186 rtcounters = counters_alloc(rts_ncounters); 187 188 pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_SOFTNET, 0, 189 "rtentry", NULL); 190 191 while (rt_hashjitter == 0) 192 rt_hashjitter = arc4random(); 193 194 #ifdef BFD 195 bfdinit(); 196 #endif 197 } 198 199 /* 200 * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise. 201 */ 202 int 203 rtisvalid(struct rtentry *rt) 204 { 205 if (rt == NULL) 206 return (0); 207 208 if (!ISSET(rt->rt_flags, RTF_UP)) 209 return (0); 210 211 if (ISSET(rt->rt_flags, RTF_GATEWAY)) { 212 KASSERT(rt->rt_gwroute != NULL); 213 KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY)); 214 if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP)) 215 return (0); 216 } 217 218 return (1); 219 } 220 221 /* 222 * Do the actual lookup for rtalloc(9), do not use directly! 223 * 224 * Return the best matching entry for the destination ``dst''. 225 * 226 * "RT_RESOLVE" means that a corresponding L2 entry should 227 * be added to the routing table and resolved (via ARP or 228 * NDP), if it does not exist. 229 */ 230 struct rtentry * 231 rt_match(struct sockaddr *dst, uint32_t *src, int flags, unsigned int tableid) 232 { 233 struct rtentry *rt = NULL; 234 235 rt = rtable_match(tableid, dst, src); 236 if (rt == NULL) { 237 rtstat_inc(rts_unreach); 238 return (NULL); 239 } 240 241 if (ISSET(rt->rt_flags, RTF_CLONING) && ISSET(flags, RT_RESOLVE)) 242 rt_clone(&rt, dst, tableid); 243 244 rt->rt_use++; 245 return (rt); 246 } 247 248 int 249 rt_clone(struct rtentry **rtp, struct sockaddr *dst, unsigned int rtableid) 250 { 251 struct rt_addrinfo info; 252 struct rtentry *rt = *rtp; 253 int error = 0; 254 255 memset(&info, 0, sizeof(info)); 256 info.rti_info[RTAX_DST] = dst; 257 258 /* 259 * The priority of cloned route should be different 260 * to avoid conflict with /32 cloning routes. 261 * 262 * It should also be higher to let the ARP layer find 263 * cloned routes instead of the cloning one. 264 */ 265 KERNEL_LOCK(); 266 error = rtrequest(RTM_RESOLVE, &info, rt->rt_priority - 1, &rt, 267 rtableid); 268 KERNEL_UNLOCK(); 269 if (error) { 270 rtm_miss(RTM_MISS, &info, 0, RTP_NONE, 0, error, rtableid); 271 } else { 272 /* Inform listeners of the new route */ 273 rtm_send(rt, RTM_ADD, 0, rtableid); 274 rtfree(*rtp); 275 *rtp = rt; 276 } 277 return (error); 278 } 279 280 /* 281 * Originated from bridge_hash() in if_bridge.c 282 */ 283 #define mix(a, b, c) do { \ 284 a -= b; a -= c; a ^= (c >> 13); \ 285 b -= c; b -= a; b ^= (a << 8); \ 286 c -= a; c -= b; c ^= (b >> 13); \ 287 a -= b; a -= c; a ^= (c >> 12); \ 288 b -= c; b -= a; b ^= (a << 16); \ 289 c -= a; c -= b; c ^= (b >> 5); \ 290 a -= b; a -= c; a ^= (c >> 3); \ 291 b -= c; b -= a; b ^= (a << 10); \ 292 c -= a; c -= b; c ^= (b >> 15); \ 293 } while (0) 294 295 int 296 rt_hash(struct rtentry *rt, struct sockaddr *dst, uint32_t *src) 297 { 298 uint32_t a, b, c; 299 300 if (src == NULL || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH)) 301 return (-1); 302 303 a = b = 0x9e3779b9; 304 c = rt_hashjitter; 305 306 switch (dst->sa_family) { 307 case AF_INET: 308 { 309 struct sockaddr_in *sin; 310 311 if (!ipmultipath) 312 return (-1); 313 314 sin = satosin(dst); 315 a += sin->sin_addr.s_addr; 316 b += src[0]; 317 mix(a, b, c); 318 break; 319 } 320 #ifdef INET6 321 case AF_INET6: 322 { 323 struct sockaddr_in6 *sin6; 324 325 if (!ip6_multipath) 326 return (-1); 327 328 sin6 = satosin6(dst); 329 a += sin6->sin6_addr.s6_addr32[0]; 330 b += sin6->sin6_addr.s6_addr32[2]; 331 c += src[0]; 332 mix(a, b, c); 333 a += sin6->sin6_addr.s6_addr32[1]; 334 b += sin6->sin6_addr.s6_addr32[3]; 335 c += src[1]; 336 mix(a, b, c); 337 a += sin6->sin6_addr.s6_addr32[2]; 338 b += sin6->sin6_addr.s6_addr32[1]; 339 c += src[2]; 340 mix(a, b, c); 341 a += sin6->sin6_addr.s6_addr32[3]; 342 b += sin6->sin6_addr.s6_addr32[0]; 343 c += src[3]; 344 mix(a, b, c); 345 break; 346 } 347 #endif /* INET6 */ 348 } 349 350 return (c & 0xffff); 351 } 352 353 /* 354 * Allocate a route, potentially using multipath to select the peer. 355 */ 356 struct rtentry * 357 rtalloc_mpath(struct sockaddr *dst, uint32_t *src, unsigned int rtableid) 358 { 359 return (rt_match(dst, src, RT_RESOLVE, rtableid)); 360 } 361 362 /* 363 * Look in the routing table for the best matching entry for 364 * ``dst''. 365 * 366 * If a route with a gateway is found and its next hop is no 367 * longer valid, try to cache it. 368 */ 369 struct rtentry * 370 rtalloc(struct sockaddr *dst, int flags, unsigned int rtableid) 371 { 372 return (rt_match(dst, NULL, flags, rtableid)); 373 } 374 375 /* 376 * Cache the route entry corresponding to a reachable next hop in 377 * the gateway entry ``rt''. 378 */ 379 int 380 rt_setgwroute(struct rtentry *rt, u_int rtableid) 381 { 382 struct rtentry *prt, *nhrt; 383 unsigned int rdomain = rtable_l2(rtableid); 384 int error; 385 386 NET_ASSERT_LOCKED(); 387 388 KASSERT(ISSET(rt->rt_flags, RTF_GATEWAY)); 389 390 /* If we cannot find a valid next hop bail. */ 391 nhrt = rt_match(rt->rt_gateway, NULL, RT_RESOLVE, rdomain); 392 if (nhrt == NULL) 393 return (ENOENT); 394 395 /* Next hop entry must be on the same interface. */ 396 if (nhrt->rt_ifidx != rt->rt_ifidx) { 397 struct sockaddr_in6 sa_mask; 398 399 if (!ISSET(nhrt->rt_flags, RTF_LLINFO) || 400 !ISSET(nhrt->rt_flags, RTF_CLONED)) { 401 rtfree(nhrt); 402 return (EHOSTUNREACH); 403 } 404 405 /* 406 * We found a L2 entry, so we might have multiple 407 * RTF_CLONING routes for the same subnet. Query 408 * the first route of the multipath chain and iterate 409 * until we find the correct one. 410 */ 411 prt = rtable_lookup(rdomain, rt_key(nhrt->rt_parent), 412 rt_plen2mask(nhrt->rt_parent, &sa_mask), NULL, RTP_ANY); 413 rtfree(nhrt); 414 415 while (prt != NULL && prt->rt_ifidx != rt->rt_ifidx) 416 prt = rtable_iterate(prt); 417 418 /* We found nothing or a non-cloning MPATH route. */ 419 if (prt == NULL || !ISSET(prt->rt_flags, RTF_CLONING)) { 420 rtfree(prt); 421 return (EHOSTUNREACH); 422 } 423 424 error = rt_clone(&prt, rt->rt_gateway, rdomain); 425 if (error) { 426 rtfree(prt); 427 return (error); 428 } 429 nhrt = prt; 430 } 431 432 /* 433 * Next hop must be reachable, this also prevents rtentry 434 * loops for example when rt->rt_gwroute points to rt. 435 */ 436 if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)) { 437 rtfree(nhrt); 438 return (ENETUNREACH); 439 } 440 441 /* Next hop is valid so remove possible old cache. */ 442 rt_putgwroute(rt); 443 KASSERT(rt->rt_gwroute == NULL); 444 445 /* 446 * If the MTU of next hop is 0, this will reset the MTU of the 447 * route to run PMTUD again from scratch. 448 */ 449 if (!ISSET(rt->rt_locks, RTV_MTU) && (rt->rt_mtu > nhrt->rt_mtu)) 450 rt->rt_mtu = nhrt->rt_mtu; 451 452 /* 453 * To avoid reference counting problems when writing link-layer 454 * addresses in an outgoing packet, we ensure that the lifetime 455 * of a cached entry is greater than the bigger lifetime of the 456 * gateway entries it is pointed by. 457 */ 458 nhrt->rt_flags |= RTF_CACHED; 459 nhrt->rt_cachecnt++; 460 461 rt->rt_gwroute = nhrt; 462 463 return (0); 464 } 465 466 /* 467 * Invalidate the cached route entry of the gateway entry ``rt''. 468 */ 469 void 470 rt_putgwroute(struct rtentry *rt) 471 { 472 struct rtentry *nhrt = rt->rt_gwroute; 473 474 NET_ASSERT_LOCKED(); 475 476 if (!ISSET(rt->rt_flags, RTF_GATEWAY) || nhrt == NULL) 477 return; 478 479 KASSERT(ISSET(nhrt->rt_flags, RTF_CACHED)); 480 KASSERT(nhrt->rt_cachecnt > 0); 481 482 --nhrt->rt_cachecnt; 483 if (nhrt->rt_cachecnt == 0) 484 nhrt->rt_flags &= ~RTF_CACHED; 485 486 rtfree(rt->rt_gwroute); 487 rt->rt_gwroute = NULL; 488 } 489 490 void 491 rtref(struct rtentry *rt) 492 { 493 atomic_inc_int(&rt->rt_refcnt); 494 } 495 496 void 497 rtfree(struct rtentry *rt) 498 { 499 int refcnt; 500 501 if (rt == NULL) 502 return; 503 504 refcnt = (int)atomic_dec_int_nv(&rt->rt_refcnt); 505 if (refcnt <= 0) { 506 KASSERT(!ISSET(rt->rt_flags, RTF_UP)); 507 KASSERT(!RT_ROOT(rt)); 508 atomic_dec_int(&rttrash); 509 if (refcnt < 0) { 510 printf("rtfree: %p not freed (neg refs)\n", rt); 511 return; 512 } 513 514 KERNEL_LOCK(); 515 rt_timer_remove_all(rt); 516 ifafree(rt->rt_ifa); 517 rtlabel_unref(rt->rt_labelid); 518 #ifdef MPLS 519 rt_mpls_clear(rt); 520 #endif 521 free(rt->rt_gateway, M_RTABLE, ROUNDUP(rt->rt_gateway->sa_len)); 522 free(rt_key(rt), M_RTABLE, rt_key(rt)->sa_len); 523 KERNEL_UNLOCK(); 524 525 pool_put(&rtentry_pool, rt); 526 } 527 } 528 529 void 530 ifafree(struct ifaddr *ifa) 531 { 532 if (ifa == NULL) 533 panic("ifafree"); 534 if (ifa->ifa_refcnt == 0) { 535 ifatrash--; 536 free(ifa, M_IFADDR, 0); 537 } else 538 ifa->ifa_refcnt--; 539 } 540 541 /* 542 * Force a routing table entry to the specified 543 * destination to go through the given gateway. 544 * Normally called as a result of a routing redirect 545 * message from the network layer. 546 */ 547 void 548 rtredirect(struct sockaddr *dst, struct sockaddr *gateway, 549 struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain) 550 { 551 struct rtentry *rt; 552 int error = 0; 553 enum rtstat_counters stat = rts_ncounters; 554 struct rt_addrinfo info; 555 struct ifaddr *ifa; 556 unsigned int ifidx = 0; 557 int flags = RTF_GATEWAY|RTF_HOST; 558 uint8_t prio = RTP_NONE; 559 560 NET_ASSERT_LOCKED(); 561 562 /* verify the gateway is directly reachable */ 563 rt = rtalloc(gateway, 0, rdomain); 564 if (!rtisvalid(rt) || ISSET(rt->rt_flags, RTF_GATEWAY)) { 565 rtfree(rt); 566 error = ENETUNREACH; 567 goto out; 568 } 569 ifidx = rt->rt_ifidx; 570 ifa = rt->rt_ifa; 571 rtfree(rt); 572 rt = NULL; 573 574 rt = rtable_lookup(rdomain, dst, NULL, NULL, RTP_ANY); 575 /* 576 * If the redirect isn't from our current router for this dst, 577 * it's either old or wrong. If it redirects us to ourselves, 578 * we have a routing loop, perhaps as a result of an interface 579 * going down recently. 580 */ 581 #define equal(a1, a2) \ 582 ((a1)->sa_len == (a2)->sa_len && \ 583 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) 584 if (rt != NULL && (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) 585 error = EINVAL; 586 else if (ifa_ifwithaddr(gateway, rdomain) != NULL || 587 (gateway->sa_family = AF_INET && 588 in_broadcast(satosin(gateway)->sin_addr, rdomain))) 589 error = EHOSTUNREACH; 590 if (error) 591 goto done; 592 /* 593 * Create a new entry if we just got back a wildcard entry 594 * or the lookup failed. This is necessary for hosts 595 * which use routing redirects generated by smart gateways 596 * to dynamically build the routing tables. 597 */ 598 if (rt == NULL) 599 goto create; 600 /* 601 * Don't listen to the redirect if it's 602 * for a route to an interface. 603 */ 604 if (ISSET(rt->rt_flags, RTF_GATEWAY)) { 605 if (!ISSET(rt->rt_flags, RTF_HOST)) { 606 /* 607 * Changing from route to net => route to host. 608 * Create new route, rather than smashing route to net. 609 */ 610 create: 611 rtfree(rt); 612 flags |= RTF_DYNAMIC; 613 bzero(&info, sizeof(info)); 614 info.rti_info[RTAX_DST] = dst; 615 info.rti_info[RTAX_GATEWAY] = gateway; 616 info.rti_ifa = ifa; 617 info.rti_flags = flags; 618 rt = NULL; 619 error = rtrequest(RTM_ADD, &info, RTP_DEFAULT, &rt, 620 rdomain); 621 if (error == 0) { 622 flags = rt->rt_flags; 623 prio = rt->rt_priority; 624 } 625 stat = rts_dynamic; 626 } else { 627 /* 628 * Smash the current notion of the gateway to 629 * this destination. Should check about netmask!!! 630 */ 631 rt->rt_flags |= RTF_MODIFIED; 632 flags |= RTF_MODIFIED; 633 prio = rt->rt_priority; 634 stat = rts_newgateway; 635 rt_setgate(rt, gateway, rdomain); 636 } 637 } else 638 error = EHOSTUNREACH; 639 done: 640 if (rt) { 641 if (rtp && !error) 642 *rtp = rt; 643 else 644 rtfree(rt); 645 } 646 out: 647 if (error) 648 rtstat_inc(rts_badredirect); 649 else if (stat != rts_ncounters) 650 rtstat_inc(stat); 651 bzero((caddr_t)&info, sizeof(info)); 652 info.rti_info[RTAX_DST] = dst; 653 info.rti_info[RTAX_GATEWAY] = gateway; 654 info.rti_info[RTAX_AUTHOR] = src; 655 rtm_miss(RTM_REDIRECT, &info, flags, prio, ifidx, error, rdomain); 656 } 657 658 /* 659 * Delete a route and generate a message 660 */ 661 int 662 rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid) 663 { 664 int error; 665 struct rt_addrinfo info; 666 struct sockaddr_rtlabel sa_rl; 667 struct sockaddr_in6 sa_mask; 668 669 KASSERT(rt->rt_ifidx == ifp->if_index); 670 671 /* 672 * Request the new route so that the entry is not actually 673 * deleted. That will allow the information being reported to 674 * be accurate (and consistent with route_output()). 675 */ 676 memset(&info, 0, sizeof(info)); 677 info.rti_info[RTAX_DST] = rt_key(rt); 678 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; 679 if (!ISSET(rt->rt_flags, RTF_HOST)) 680 info.rti_info[RTAX_NETMASK] = rt_plen2mask(rt, &sa_mask); 681 info.rti_info[RTAX_LABEL] = rtlabel_id2sa(rt->rt_labelid, &sa_rl); 682 info.rti_flags = rt->rt_flags; 683 info.rti_info[RTAX_IFP] = sdltosa(ifp->if_sadl); 684 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr; 685 error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid); 686 rtm_miss(RTM_DELETE, &info, info.rti_flags, rt->rt_priority, 687 rt->rt_ifidx, error, tableid); 688 if (error == 0) 689 rtfree(rt); 690 return (error); 691 } 692 693 static inline int 694 rtequal(struct rtentry *a, struct rtentry *b) 695 { 696 if (a == b) 697 return 1; 698 699 if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len) == 0 && 700 rt_plen(a) == rt_plen(b)) 701 return 1; 702 else 703 return 0; 704 } 705 706 int 707 rtflushclone1(struct rtentry *rt, void *arg, u_int id) 708 { 709 struct rtentry *cloningrt = arg; 710 struct ifnet *ifp; 711 712 if (!ISSET(rt->rt_flags, RTF_CLONED)) 713 return 0; 714 715 /* Cached route must stay alive as long as their parent are alive. */ 716 if (ISSET(rt->rt_flags, RTF_CACHED) && (rt->rt_parent != cloningrt)) 717 return 0; 718 719 if (!rtequal(rt->rt_parent, cloningrt)) 720 return 0; 721 /* 722 * This happens when an interface with a RTF_CLONING route is 723 * being detached. In this case it's safe to bail because all 724 * the routes are being purged by rt_ifa_purge(). 725 */ 726 ifp = if_get(rt->rt_ifidx); 727 if (ifp == NULL) 728 return 0; 729 730 if_put(ifp); 731 return EEXIST; 732 } 733 734 int 735 rtflushclone(struct rtentry *parent, unsigned int rtableid) 736 { 737 struct rtentry *rt = NULL; 738 struct ifnet *ifp; 739 int error; 740 741 #ifdef DIAGNOSTIC 742 if (!parent || (parent->rt_flags & RTF_CLONING) == 0) 743 panic("rtflushclone: called with a non-cloning route"); 744 #endif 745 746 do { 747 error = rtable_walk(rtableid, rt_key(parent)->sa_family, &rt, 748 rtflushclone1, parent); 749 if (rt != NULL && error == EEXIST) { 750 ifp = if_get(rt->rt_ifidx); 751 if (ifp == NULL) { 752 error = EAGAIN; 753 } else { 754 error = rtdeletemsg(rt, ifp, rtableid); 755 if (error == 0) 756 error = EAGAIN; 757 if_put(ifp); 758 } 759 } 760 rtfree(rt); 761 rt = NULL; 762 } while (error == EAGAIN); 763 764 return error; 765 766 } 767 768 int 769 rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp, 770 struct rtentry **ret_nrt, u_int tableid) 771 { 772 struct rtentry *rt; 773 int error; 774 775 NET_ASSERT_LOCKED(); 776 777 if (!rtable_exists(tableid)) 778 return (EAFNOSUPPORT); 779 rt = rtable_lookup(tableid, info->rti_info[RTAX_DST], 780 info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], prio); 781 if (rt == NULL) 782 return (ESRCH); 783 784 /* Make sure that's the route the caller want to delete. */ 785 if (ifp != NULL && ifp->if_index != rt->rt_ifidx) { 786 rtfree(rt); 787 return (ESRCH); 788 } 789 790 #ifdef BFD 791 if (ISSET(rt->rt_flags, RTF_BFD)) 792 bfdclear(rt); 793 #endif 794 795 error = rtable_delete(tableid, info->rti_info[RTAX_DST], 796 info->rti_info[RTAX_NETMASK], rt); 797 if (error != 0) { 798 rtfree(rt); 799 return (ESRCH); 800 } 801 802 /* Release next hop cache before flushing cloned entries. */ 803 rt_putgwroute(rt); 804 805 /* Clean up any cloned children. */ 806 if (ISSET(rt->rt_flags, RTF_CLONING)) 807 rtflushclone(rt, tableid); 808 809 rtfree(rt->rt_parent); 810 rt->rt_parent = NULL; 811 812 rt->rt_flags &= ~RTF_UP; 813 814 KASSERT(ifp->if_index == rt->rt_ifidx); 815 ifp->if_rtrequest(ifp, RTM_DELETE, rt); 816 817 atomic_inc_int(&rttrash); 818 819 if (ret_nrt != NULL) 820 *ret_nrt = rt; 821 else 822 rtfree(rt); 823 824 return (0); 825 } 826 827 int 828 rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio, 829 struct rtentry **ret_nrt, u_int tableid) 830 { 831 struct ifnet *ifp; 832 struct rtentry *rt, *crt; 833 struct ifaddr *ifa; 834 struct sockaddr *ndst; 835 struct sockaddr_rtlabel *sa_rl, sa_rl2; 836 struct sockaddr_dl sa_dl = { sizeof(sa_dl), AF_LINK }; 837 int error; 838 839 NET_ASSERT_LOCKED(); 840 841 if (!rtable_exists(tableid)) 842 return (EAFNOSUPPORT); 843 if (info->rti_flags & RTF_HOST) 844 info->rti_info[RTAX_NETMASK] = NULL; 845 switch (req) { 846 case RTM_DELETE: 847 return (EINVAL); 848 849 case RTM_RESOLVE: 850 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) 851 return (EINVAL); 852 if ((rt->rt_flags & RTF_CLONING) == 0) 853 return (EINVAL); 854 KASSERT(rt->rt_ifa->ifa_ifp != NULL); 855 info->rti_ifa = rt->rt_ifa; 856 info->rti_flags = rt->rt_flags | (RTF_CLONED|RTF_HOST); 857 info->rti_flags &= ~(RTF_CLONING|RTF_CONNECTED|RTF_STATIC); 858 info->rti_info[RTAX_GATEWAY] = sdltosa(&sa_dl); 859 info->rti_info[RTAX_LABEL] = 860 rtlabel_id2sa(rt->rt_labelid, &sa_rl2); 861 /* FALLTHROUGH */ 862 863 case RTM_ADD: 864 if (info->rti_ifa == NULL) 865 return (EINVAL); 866 ifa = info->rti_ifa; 867 ifp = ifa->ifa_ifp; 868 if (prio == 0) 869 prio = ifp->if_priority + RTP_STATIC; 870 871 error = rt_copysa(info->rti_info[RTAX_DST], 872 info->rti_info[RTAX_NETMASK], &ndst); 873 if (error) 874 return (error); 875 876 rt = pool_get(&rtentry_pool, PR_NOWAIT | PR_ZERO); 877 if (rt == NULL) { 878 free(ndst, M_RTABLE, ndst->sa_len); 879 return (ENOBUFS); 880 } 881 882 rt->rt_refcnt = 1; 883 rt->rt_flags = info->rti_flags | RTF_UP; 884 rt->rt_priority = prio; /* init routing priority */ 885 LIST_INIT(&rt->rt_timer); 886 887 /* Check the link state if the table supports it. */ 888 if (rtable_mpath_capable(tableid, ndst->sa_family) && 889 !ISSET(rt->rt_flags, RTF_LOCAL) && 890 (!LINK_STATE_IS_UP(ifp->if_link_state) || 891 !ISSET(ifp->if_flags, IFF_UP))) { 892 rt->rt_flags &= ~RTF_UP; 893 rt->rt_priority |= RTP_DOWN; 894 } 895 896 if (info->rti_info[RTAX_LABEL] != NULL) { 897 sa_rl = (struct sockaddr_rtlabel *) 898 info->rti_info[RTAX_LABEL]; 899 rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label); 900 } 901 902 #ifdef MPLS 903 /* We have to allocate additional space for MPLS infos */ 904 if (info->rti_flags & RTF_MPLS && 905 (info->rti_info[RTAX_SRC] != NULL || 906 info->rti_info[RTAX_DST]->sa_family == AF_MPLS)) { 907 error = rt_mpls_set(rt, info->rti_info[RTAX_SRC], 908 info->rti_mpls); 909 if (error) { 910 free(ndst, M_RTABLE, ndst->sa_len); 911 pool_put(&rtentry_pool, rt); 912 return (error); 913 } 914 } else 915 rt_mpls_clear(rt); 916 #endif 917 918 ifa->ifa_refcnt++; 919 rt->rt_ifa = ifa; 920 rt->rt_ifidx = ifp->if_index; 921 /* 922 * Copy metrics and a back pointer from the cloned 923 * route's parent. 924 */ 925 if (ISSET(rt->rt_flags, RTF_CLONED)) { 926 rtref(*ret_nrt); 927 rt->rt_parent = *ret_nrt; 928 rt->rt_rmx = (*ret_nrt)->rt_rmx; 929 } 930 931 /* 932 * We must set rt->rt_gateway before adding ``rt'' to 933 * the routing table because the radix MPATH code use 934 * it to (re)order routes. 935 */ 936 if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY], 937 tableid))) { 938 ifafree(ifa); 939 rtfree(rt->rt_parent); 940 rt_putgwroute(rt); 941 free(rt->rt_gateway, M_RTABLE, 942 ROUNDUP(rt->rt_gateway->sa_len)); 943 free(ndst, M_RTABLE, ndst->sa_len); 944 pool_put(&rtentry_pool, rt); 945 return (error); 946 } 947 948 error = rtable_insert(tableid, ndst, 949 info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], 950 rt->rt_priority, rt); 951 if (error != 0 && 952 (crt = rtable_match(tableid, ndst, NULL)) != NULL) { 953 /* overwrite cloned route */ 954 if (ISSET(crt->rt_flags, RTF_CLONED) && 955 !ISSET(crt->rt_flags, RTF_CACHED)) { 956 struct ifnet *cifp; 957 958 cifp = if_get(crt->rt_ifidx); 959 KASSERT(cifp != NULL); 960 rtdeletemsg(crt, cifp, tableid); 961 if_put(cifp); 962 963 error = rtable_insert(tableid, ndst, 964 info->rti_info[RTAX_NETMASK], 965 info->rti_info[RTAX_GATEWAY], 966 rt->rt_priority, rt); 967 } 968 rtfree(crt); 969 } 970 if (error != 0) { 971 ifafree(ifa); 972 rtfree(rt->rt_parent); 973 rt_putgwroute(rt); 974 free(rt->rt_gateway, M_RTABLE, 975 ROUNDUP(rt->rt_gateway->sa_len)); 976 free(ndst, M_RTABLE, ndst->sa_len); 977 pool_put(&rtentry_pool, rt); 978 return (EEXIST); 979 } 980 ifp->if_rtrequest(ifp, req, rt); 981 982 if_group_routechange(info->rti_info[RTAX_DST], 983 info->rti_info[RTAX_NETMASK]); 984 985 if (ret_nrt != NULL) 986 *ret_nrt = rt; 987 else 988 rtfree(rt); 989 break; 990 } 991 992 return (0); 993 } 994 995 int 996 rt_setgate(struct rtentry *rt, struct sockaddr *gate, u_int rtableid) 997 { 998 int glen = ROUNDUP(gate->sa_len); 999 struct sockaddr *sa; 1000 1001 if (rt->rt_gateway == NULL || glen != ROUNDUP(rt->rt_gateway->sa_len)) { 1002 sa = malloc(glen, M_RTABLE, M_NOWAIT); 1003 if (sa == NULL) 1004 return (ENOBUFS); 1005 if (rt->rt_gateway != NULL) { 1006 free(rt->rt_gateway, M_RTABLE, 1007 ROUNDUP(rt->rt_gateway->sa_len)); 1008 } 1009 rt->rt_gateway = sa; 1010 } 1011 memmove(rt->rt_gateway, gate, glen); 1012 1013 if (ISSET(rt->rt_flags, RTF_GATEWAY)) 1014 return (rt_setgwroute(rt, rtableid)); 1015 1016 return (0); 1017 } 1018 1019 /* 1020 * Return the route entry containing the next hop link-layer 1021 * address corresponding to ``rt''. 1022 */ 1023 struct rtentry * 1024 rt_getll(struct rtentry *rt) 1025 { 1026 if (ISSET(rt->rt_flags, RTF_GATEWAY)) { 1027 KASSERT(rt->rt_gwroute != NULL); 1028 return (rt->rt_gwroute); 1029 } 1030 1031 return (rt); 1032 } 1033 1034 void 1035 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, 1036 struct sockaddr *netmask) 1037 { 1038 u_char *cp1 = (u_char *)src; 1039 u_char *cp2 = (u_char *)dst; 1040 u_char *cp3 = (u_char *)netmask; 1041 u_char *cplim = cp2 + *cp3; 1042 u_char *cplim2 = cp2 + *cp1; 1043 1044 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ 1045 cp3 += 2; 1046 if (cplim > cplim2) 1047 cplim = cplim2; 1048 while (cp2 < cplim) 1049 *cp2++ = *cp1++ & *cp3++; 1050 if (cp2 < cplim2) 1051 bzero(cp2, cplim2 - cp2); 1052 } 1053 1054 /* 1055 * allocate new sockaddr structure based on the user supplied src and mask 1056 * that is useable for the routing table. 1057 */ 1058 static int 1059 rt_copysa(struct sockaddr *src, struct sockaddr *mask, struct sockaddr **dst) 1060 { 1061 static const u_char maskarray[] = { 1062 0x0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; 1063 struct sockaddr *ndst; 1064 const struct domain *dp; 1065 u_char *csrc, *cdst; 1066 int i, plen; 1067 1068 for (i = 0; (dp = domains[i]) != NULL; i++) { 1069 if (dp->dom_rtoffset == 0) 1070 continue; 1071 if (src->sa_family == dp->dom_family) 1072 break; 1073 } 1074 if (dp == NULL) 1075 return (EAFNOSUPPORT); 1076 1077 if (src->sa_len < dp->dom_sasize) 1078 return (EINVAL); 1079 1080 plen = rtable_satoplen(src->sa_family, mask); 1081 if (plen == -1) 1082 return (EINVAL); 1083 1084 ndst = malloc(dp->dom_sasize, M_RTABLE, M_NOWAIT|M_ZERO); 1085 if (ndst == NULL) 1086 return (ENOBUFS); 1087 1088 ndst->sa_family = src->sa_family; 1089 ndst->sa_len = dp->dom_sasize; 1090 1091 csrc = (u_char *)src + dp->dom_rtoffset; 1092 cdst = (u_char *)ndst + dp->dom_rtoffset; 1093 1094 memcpy(cdst, csrc, plen / 8); 1095 if (plen % 8 != 0) 1096 cdst[plen / 8] = csrc[plen / 8] & maskarray[plen % 8]; 1097 1098 *dst = ndst; 1099 return (0); 1100 } 1101 1102 int 1103 rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst, 1104 unsigned int rdomain) 1105 { 1106 struct ifnet *ifp = ifa->ifa_ifp; 1107 struct rtentry *rt; 1108 struct sockaddr_rtlabel sa_rl; 1109 struct rt_addrinfo info; 1110 uint8_t prio = ifp->if_priority + RTP_STATIC; 1111 int error; 1112 1113 KASSERT(rdomain == rtable_l2(rdomain)); 1114 1115 memset(&info, 0, sizeof(info)); 1116 info.rti_ifa = ifa; 1117 info.rti_flags = flags; 1118 info.rti_info[RTAX_DST] = dst; 1119 if (flags & RTF_LLINFO) 1120 info.rti_info[RTAX_GATEWAY] = sdltosa(ifp->if_sadl); 1121 else 1122 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1123 info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl); 1124 1125 #ifdef MPLS 1126 if ((flags & RTF_MPLS) == RTF_MPLS) 1127 info.rti_mpls = MPLS_OP_POP; 1128 #endif /* MPLS */ 1129 1130 if ((flags & RTF_HOST) == 0) 1131 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask; 1132 1133 if (flags & (RTF_LOCAL|RTF_BROADCAST)) 1134 prio = RTP_LOCAL; 1135 1136 if (flags & RTF_CONNECTED) 1137 prio = ifp->if_priority + RTP_CONNECTED; 1138 1139 error = rtrequest(RTM_ADD, &info, prio, &rt, rdomain); 1140 if (error == 0) { 1141 /* 1142 * A local route is created for every address configured 1143 * on an interface, so use this information to notify 1144 * userland that a new address has been added. 1145 */ 1146 if (flags & RTF_LOCAL) 1147 rtm_addr(RTM_NEWADDR, ifa); 1148 rtm_send(rt, RTM_ADD, 0, rdomain); 1149 rtfree(rt); 1150 } 1151 return (error); 1152 } 1153 1154 int 1155 rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst, 1156 unsigned int rdomain) 1157 { 1158 struct ifnet *ifp = ifa->ifa_ifp; 1159 struct rtentry *rt; 1160 struct mbuf *m = NULL; 1161 struct sockaddr *deldst; 1162 struct rt_addrinfo info; 1163 struct sockaddr_rtlabel sa_rl; 1164 uint8_t prio = ifp->if_priority + RTP_STATIC; 1165 int error; 1166 1167 KASSERT(rdomain == rtable_l2(rdomain)); 1168 1169 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) { 1170 m = m_get(M_DONTWAIT, MT_SONAME); 1171 if (m == NULL) 1172 return (ENOBUFS); 1173 deldst = mtod(m, struct sockaddr *); 1174 rt_maskedcopy(dst, deldst, ifa->ifa_netmask); 1175 dst = deldst; 1176 } 1177 1178 memset(&info, 0, sizeof(info)); 1179 info.rti_ifa = ifa; 1180 info.rti_flags = flags; 1181 info.rti_info[RTAX_DST] = dst; 1182 if ((flags & RTF_LLINFO) == 0) 1183 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr; 1184 info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl); 1185 1186 if ((flags & RTF_HOST) == 0) 1187 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask; 1188 1189 if (flags & (RTF_LOCAL|RTF_BROADCAST)) 1190 prio = RTP_LOCAL; 1191 1192 if (flags & RTF_CONNECTED) 1193 prio = ifp->if_priority + RTP_CONNECTED; 1194 1195 rtable_clearsource(rdomain, ifa->ifa_addr); 1196 error = rtrequest_delete(&info, prio, ifp, &rt, rdomain); 1197 if (error == 0) { 1198 rtm_send(rt, RTM_DELETE, 0, rdomain); 1199 if (flags & RTF_LOCAL) 1200 rtm_addr(RTM_DELADDR, ifa); 1201 rtfree(rt); 1202 } 1203 m_free(m); 1204 1205 return (error); 1206 } 1207 1208 /* 1209 * Add ifa's address as a local rtentry. 1210 */ 1211 int 1212 rt_ifa_addlocal(struct ifaddr *ifa) 1213 { 1214 struct ifnet *ifp = ifa->ifa_ifp; 1215 struct rtentry *rt; 1216 u_int flags = RTF_HOST|RTF_LOCAL; 1217 int error = 0; 1218 1219 /* 1220 * If the configured address correspond to the magical "any" 1221 * address do not add a local route entry because that might 1222 * corrupt the routing tree which uses this value for the 1223 * default routes. 1224 */ 1225 switch (ifa->ifa_addr->sa_family) { 1226 case AF_INET: 1227 if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY) 1228 return (0); 1229 break; 1230 #ifdef INET6 1231 case AF_INET6: 1232 if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr, 1233 &in6addr_any)) 1234 return (0); 1235 break; 1236 #endif 1237 default: 1238 break; 1239 } 1240 1241 if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))) 1242 flags |= RTF_LLINFO; 1243 1244 /* If there is no local entry, allocate one. */ 1245 rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomain); 1246 if (rt == NULL || ISSET(rt->rt_flags, flags) != flags) { 1247 error = rt_ifa_add(ifa, flags | RTF_MPATH, ifa->ifa_addr, 1248 ifp->if_rdomain); 1249 } 1250 rtfree(rt); 1251 1252 return (error); 1253 } 1254 1255 /* 1256 * Remove local rtentry of ifa's address if it exists. 1257 */ 1258 int 1259 rt_ifa_dellocal(struct ifaddr *ifa) 1260 { 1261 struct ifnet *ifp = ifa->ifa_ifp; 1262 struct rtentry *rt; 1263 u_int flags = RTF_HOST|RTF_LOCAL; 1264 int error = 0; 1265 1266 /* 1267 * We do not add local routes for such address, so do not bother 1268 * removing them. 1269 */ 1270 switch (ifa->ifa_addr->sa_family) { 1271 case AF_INET: 1272 if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY) 1273 return (0); 1274 break; 1275 #ifdef INET6 1276 case AF_INET6: 1277 if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr, 1278 &in6addr_any)) 1279 return (0); 1280 break; 1281 #endif 1282 default: 1283 break; 1284 } 1285 1286 if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))) 1287 flags |= RTF_LLINFO; 1288 1289 /* 1290 * Before deleting, check if a corresponding local host 1291 * route surely exists. With this check, we can avoid to 1292 * delete an interface direct route whose destination is same 1293 * as the address being removed. This can happen when removing 1294 * a subnet-router anycast address on an interface attached 1295 * to a shared medium. 1296 */ 1297 rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomain); 1298 if (rt != NULL && ISSET(rt->rt_flags, flags) == flags) { 1299 error = rt_ifa_del(ifa, flags, ifa->ifa_addr, 1300 ifp->if_rdomain); 1301 } 1302 rtfree(rt); 1303 1304 return (error); 1305 } 1306 1307 /* 1308 * Remove all addresses attached to ``ifa''. 1309 */ 1310 void 1311 rt_ifa_purge(struct ifaddr *ifa) 1312 { 1313 struct ifnet *ifp = ifa->ifa_ifp; 1314 struct rtentry *rt = NULL; 1315 unsigned int rtableid; 1316 int error, af = ifa->ifa_addr->sa_family; 1317 1318 KASSERT(ifp != NULL); 1319 1320 for (rtableid = 0; rtableid < rtmap_limit; rtableid++) { 1321 /* skip rtables that are not in the rdomain of the ifp */ 1322 if (rtable_l2(rtableid) != ifp->if_rdomain) 1323 continue; 1324 1325 do { 1326 error = rtable_walk(rtableid, af, &rt, 1327 rt_ifa_purge_walker, ifa); 1328 if (rt != NULL && error == EEXIST) { 1329 error = rtdeletemsg(rt, ifp, rtableid); 1330 if (error == 0) 1331 error = EAGAIN; 1332 } 1333 rtfree(rt); 1334 rt = NULL; 1335 } while (error == EAGAIN); 1336 1337 if (error == EAFNOSUPPORT) 1338 error = 0; 1339 1340 if (error) 1341 break; 1342 } 1343 } 1344 1345 int 1346 rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid) 1347 { 1348 struct ifaddr *ifa = vifa; 1349 1350 if (rt->rt_ifa == ifa) 1351 return EEXIST; 1352 1353 return 0; 1354 } 1355 1356 /* 1357 * Route timer routines. These routes allow functions to be called 1358 * for various routes at any time. This is useful in supporting 1359 * path MTU discovery and redirect route deletion. 1360 * 1361 * This is similar to some BSDI internal functions, but it provides 1362 * for multiple queues for efficiency's sake... 1363 */ 1364 1365 LIST_HEAD(, rttimer_queue) rttimer_queue_head; 1366 static int rt_init_done = 0; 1367 1368 #define RTTIMER_CALLOUT(r) { \ 1369 if (r->rtt_func != NULL) { \ 1370 (*r->rtt_func)(r->rtt_rt, r); \ 1371 } else { \ 1372 struct ifnet *ifp; \ 1373 \ 1374 ifp = if_get(r->rtt_rt->rt_ifidx); \ 1375 if (ifp != NULL) \ 1376 rtdeletemsg(r->rtt_rt, ifp, r->rtt_tableid); \ 1377 if_put(ifp); \ 1378 } \ 1379 } 1380 1381 /* 1382 * Some subtle order problems with domain initialization mean that 1383 * we cannot count on this being run from rt_init before various 1384 * protocol initializations are done. Therefore, we make sure 1385 * that this is run when the first queue is added... 1386 */ 1387 1388 void 1389 rt_timer_init(void) 1390 { 1391 static struct timeout rt_timer_timeout; 1392 1393 if (rt_init_done) 1394 panic("rt_timer_init: already initialized"); 1395 1396 pool_init(&rttimer_pool, sizeof(struct rttimer), 0, IPL_SOFTNET, 0, 1397 "rttmr", NULL); 1398 1399 LIST_INIT(&rttimer_queue_head); 1400 timeout_set_proc(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout); 1401 timeout_add_sec(&rt_timer_timeout, 1); 1402 rt_init_done = 1; 1403 } 1404 1405 struct rttimer_queue * 1406 rt_timer_queue_create(u_int timeout) 1407 { 1408 struct rttimer_queue *rtq; 1409 1410 if (rt_init_done == 0) 1411 rt_timer_init(); 1412 1413 if ((rtq = malloc(sizeof(*rtq), M_RTABLE, M_NOWAIT|M_ZERO)) == NULL) 1414 return (NULL); 1415 1416 rtq->rtq_timeout = timeout; 1417 rtq->rtq_count = 0; 1418 TAILQ_INIT(&rtq->rtq_head); 1419 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link); 1420 1421 return (rtq); 1422 } 1423 1424 void 1425 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout) 1426 { 1427 rtq->rtq_timeout = timeout; 1428 } 1429 1430 void 1431 rt_timer_queue_destroy(struct rttimer_queue *rtq) 1432 { 1433 struct rttimer *r; 1434 1435 NET_ASSERT_LOCKED(); 1436 1437 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) { 1438 LIST_REMOVE(r, rtt_link); 1439 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1440 RTTIMER_CALLOUT(r); 1441 pool_put(&rttimer_pool, r); 1442 if (rtq->rtq_count > 0) 1443 rtq->rtq_count--; 1444 else 1445 printf("rt_timer_queue_destroy: rtq_count reached 0\n"); 1446 } 1447 1448 LIST_REMOVE(rtq, rtq_link); 1449 free(rtq, M_RTABLE, sizeof(*rtq)); 1450 } 1451 1452 unsigned long 1453 rt_timer_queue_count(struct rttimer_queue *rtq) 1454 { 1455 return (rtq->rtq_count); 1456 } 1457 1458 void 1459 rt_timer_remove_all(struct rtentry *rt) 1460 { 1461 struct rttimer *r; 1462 1463 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) { 1464 LIST_REMOVE(r, rtt_link); 1465 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1466 if (r->rtt_queue->rtq_count > 0) 1467 r->rtt_queue->rtq_count--; 1468 else 1469 printf("rt_timer_remove_all: rtq_count reached 0\n"); 1470 pool_put(&rttimer_pool, r); 1471 } 1472 } 1473 1474 int 1475 rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *, 1476 struct rttimer *), struct rttimer_queue *queue, u_int rtableid) 1477 { 1478 struct rttimer *r; 1479 long current_time; 1480 1481 current_time = getuptime(); 1482 rt->rt_expire = getuptime() + queue->rtq_timeout; 1483 1484 /* 1485 * If there's already a timer with this action, destroy it before 1486 * we add a new one. 1487 */ 1488 LIST_FOREACH(r, &rt->rt_timer, rtt_link) { 1489 if (r->rtt_func == func) { 1490 LIST_REMOVE(r, rtt_link); 1491 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next); 1492 if (r->rtt_queue->rtq_count > 0) 1493 r->rtt_queue->rtq_count--; 1494 else 1495 printf("rt_timer_add: rtq_count reached 0\n"); 1496 pool_put(&rttimer_pool, r); 1497 break; /* only one per list, so we can quit... */ 1498 } 1499 } 1500 1501 r = pool_get(&rttimer_pool, PR_NOWAIT | PR_ZERO); 1502 if (r == NULL) 1503 return (ENOBUFS); 1504 1505 r->rtt_rt = rt; 1506 r->rtt_time = current_time; 1507 r->rtt_func = func; 1508 r->rtt_queue = queue; 1509 r->rtt_tableid = rtableid; 1510 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link); 1511 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next); 1512 r->rtt_queue->rtq_count++; 1513 1514 return (0); 1515 } 1516 1517 void 1518 rt_timer_timer(void *arg) 1519 { 1520 struct timeout *to = (struct timeout *)arg; 1521 struct rttimer_queue *rtq; 1522 struct rttimer *r; 1523 long current_time; 1524 1525 current_time = getuptime(); 1526 1527 NET_LOCK(); 1528 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) { 1529 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL && 1530 (r->rtt_time + rtq->rtq_timeout) < current_time) { 1531 LIST_REMOVE(r, rtt_link); 1532 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next); 1533 RTTIMER_CALLOUT(r); 1534 pool_put(&rttimer_pool, r); 1535 if (rtq->rtq_count > 0) 1536 rtq->rtq_count--; 1537 else 1538 printf("rt_timer_timer: rtq_count reached 0\n"); 1539 } 1540 } 1541 NET_UNLOCK(); 1542 1543 timeout_add_sec(to, 1); 1544 } 1545 1546 #ifdef MPLS 1547 int 1548 rt_mpls_set(struct rtentry *rt, struct sockaddr *src, uint8_t op) 1549 { 1550 struct sockaddr_mpls *psa_mpls = (struct sockaddr_mpls *)src; 1551 struct rt_mpls *rt_mpls; 1552 1553 if (psa_mpls == NULL && op != MPLS_OP_POP) 1554 return (EOPNOTSUPP); 1555 if (psa_mpls != NULL && psa_mpls->smpls_len != sizeof(*psa_mpls)) 1556 return (EINVAL); 1557 if (psa_mpls != NULL && psa_mpls->smpls_family != AF_MPLS) 1558 return (EAFNOSUPPORT); 1559 1560 rt->rt_llinfo = malloc(sizeof(struct rt_mpls), M_TEMP, M_NOWAIT|M_ZERO); 1561 if (rt->rt_llinfo == NULL) 1562 return (ENOMEM); 1563 1564 rt_mpls = (struct rt_mpls *)rt->rt_llinfo; 1565 if (psa_mpls != NULL) 1566 rt_mpls->mpls_label = psa_mpls->smpls_label; 1567 rt_mpls->mpls_operation = op; 1568 /* XXX: set experimental bits */ 1569 rt->rt_flags |= RTF_MPLS; 1570 1571 return (0); 1572 } 1573 1574 void 1575 rt_mpls_clear(struct rtentry *rt) 1576 { 1577 if (rt->rt_llinfo != NULL && rt->rt_flags & RTF_MPLS) { 1578 free(rt->rt_llinfo, M_TEMP, sizeof(struct rt_mpls)); 1579 rt->rt_llinfo = NULL; 1580 } 1581 rt->rt_flags &= ~RTF_MPLS; 1582 } 1583 #endif 1584 1585 u_int16_t 1586 rtlabel_name2id(char *name) 1587 { 1588 struct rt_label *label, *p; 1589 u_int16_t new_id = 1; 1590 1591 if (!name[0]) 1592 return (0); 1593 1594 TAILQ_FOREACH(label, &rt_labels, rtl_entry) 1595 if (strcmp(name, label->rtl_name) == 0) { 1596 label->rtl_ref++; 1597 return (label->rtl_id); 1598 } 1599 1600 /* 1601 * to avoid fragmentation, we do a linear search from the beginning 1602 * and take the first free slot we find. if there is none or the list 1603 * is empty, append a new entry at the end. 1604 */ 1605 TAILQ_FOREACH(p, &rt_labels, rtl_entry) { 1606 if (p->rtl_id != new_id) 1607 break; 1608 new_id = p->rtl_id + 1; 1609 } 1610 if (new_id > LABELID_MAX) 1611 return (0); 1612 1613 label = malloc(sizeof(*label), M_RTABLE, M_NOWAIT|M_ZERO); 1614 if (label == NULL) 1615 return (0); 1616 strlcpy(label->rtl_name, name, sizeof(label->rtl_name)); 1617 label->rtl_id = new_id; 1618 label->rtl_ref++; 1619 1620 if (p != NULL) /* insert new entry before p */ 1621 TAILQ_INSERT_BEFORE(p, label, rtl_entry); 1622 else /* either list empty or no free slot in between */ 1623 TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry); 1624 1625 return (label->rtl_id); 1626 } 1627 1628 const char * 1629 rtlabel_id2name(u_int16_t id) 1630 { 1631 struct rt_label *label; 1632 1633 TAILQ_FOREACH(label, &rt_labels, rtl_entry) 1634 if (label->rtl_id == id) 1635 return (label->rtl_name); 1636 1637 return (NULL); 1638 } 1639 1640 struct sockaddr * 1641 rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl) 1642 { 1643 const char *label; 1644 1645 if (labelid == 0 || (label = rtlabel_id2name(labelid)) == NULL) 1646 return (NULL); 1647 1648 bzero(sa_rl, sizeof(*sa_rl)); 1649 sa_rl->sr_len = sizeof(*sa_rl); 1650 sa_rl->sr_family = AF_UNSPEC; 1651 strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label)); 1652 1653 return ((struct sockaddr *)sa_rl); 1654 } 1655 1656 void 1657 rtlabel_unref(u_int16_t id) 1658 { 1659 struct rt_label *p, *next; 1660 1661 if (id == 0) 1662 return; 1663 1664 TAILQ_FOREACH_SAFE(p, &rt_labels, rtl_entry, next) { 1665 if (id == p->rtl_id) { 1666 if (--p->rtl_ref == 0) { 1667 TAILQ_REMOVE(&rt_labels, p, rtl_entry); 1668 free(p, M_RTABLE, sizeof(*p)); 1669 } 1670 break; 1671 } 1672 } 1673 } 1674 1675 int 1676 rt_if_track(struct ifnet *ifp) 1677 { 1678 unsigned int rtableid; 1679 struct rtentry *rt = NULL; 1680 int i, error = 0; 1681 1682 for (rtableid = 0; rtableid < rtmap_limit; rtableid++) { 1683 /* skip rtables that are not in the rdomain of the ifp */ 1684 if (rtable_l2(rtableid) != ifp->if_rdomain) 1685 continue; 1686 for (i = 1; i <= AF_MAX; i++) { 1687 if (!rtable_mpath_capable(rtableid, i)) 1688 continue; 1689 1690 do { 1691 error = rtable_walk(rtableid, i, &rt, 1692 rt_if_linkstate_change, ifp); 1693 if (rt != NULL && error == EEXIST) { 1694 error = rtdeletemsg(rt, ifp, rtableid); 1695 if (error == 0) 1696 error = EAGAIN; 1697 } 1698 rtfree(rt); 1699 rt = NULL; 1700 } while (error == EAGAIN); 1701 1702 if (error == EAFNOSUPPORT) 1703 error = 0; 1704 1705 if (error) 1706 break; 1707 } 1708 } 1709 1710 return (error); 1711 } 1712 1713 int 1714 rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id) 1715 { 1716 struct ifnet *ifp = arg; 1717 struct sockaddr_in6 sa_mask; 1718 int error; 1719 1720 if (rt->rt_ifidx != ifp->if_index) 1721 return (0); 1722 1723 /* Local routes are always usable. */ 1724 if (rt->rt_flags & RTF_LOCAL) { 1725 rt->rt_flags |= RTF_UP; 1726 return (0); 1727 } 1728 1729 if (LINK_STATE_IS_UP(ifp->if_link_state) && ifp->if_flags & IFF_UP) { 1730 if (ISSET(rt->rt_flags, RTF_UP)) 1731 return (0); 1732 1733 /* bring route up */ 1734 rt->rt_flags |= RTF_UP; 1735 error = rtable_mpath_reprio(id, rt_key(rt), rt_plen(rt), 1736 rt->rt_priority & RTP_MASK, rt); 1737 } else { 1738 /* 1739 * Remove redirected and cloned routes (mainly ARP) 1740 * from down interfaces so we have a chance to get 1741 * new routes from a better source. 1742 */ 1743 if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC) && 1744 !ISSET(rt->rt_flags, RTF_CACHED|RTF_BFD)) { 1745 return (EEXIST); 1746 } 1747 1748 if (!ISSET(rt->rt_flags, RTF_UP)) 1749 return (0); 1750 1751 /* take route down */ 1752 rt->rt_flags &= ~RTF_UP; 1753 error = rtable_mpath_reprio(id, rt_key(rt), rt_plen(rt), 1754 rt->rt_priority | RTP_DOWN, rt); 1755 } 1756 if_group_routechange(rt_key(rt), rt_plen2mask(rt, &sa_mask)); 1757 1758 return (error); 1759 } 1760 1761 struct sockaddr * 1762 rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask) 1763 { 1764 struct sockaddr_in *sin = (struct sockaddr_in *)sa_mask; 1765 #ifdef INET6 1766 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa_mask; 1767 #endif 1768 1769 KASSERT(plen >= 0 || plen == -1); 1770 1771 if (plen == -1) 1772 return (NULL); 1773 1774 memset(sa_mask, 0, sizeof(*sa_mask)); 1775 1776 switch (af) { 1777 case AF_INET: 1778 sin->sin_family = AF_INET; 1779 sin->sin_len = sizeof(struct sockaddr_in); 1780 in_prefixlen2mask(&sin->sin_addr, plen); 1781 break; 1782 #ifdef INET6 1783 case AF_INET6: 1784 sin6->sin6_family = AF_INET6; 1785 sin6->sin6_len = sizeof(struct sockaddr_in6); 1786 in6_prefixlen2mask(&sin6->sin6_addr, plen); 1787 break; 1788 #endif /* INET6 */ 1789 default: 1790 return (NULL); 1791 } 1792 1793 return ((struct sockaddr *)sa_mask); 1794 } 1795 1796 struct sockaddr * 1797 rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask) 1798 { 1799 return (rt_plentosa(rt_key(rt)->sa_family, rt_plen(rt), sa_mask)); 1800 } 1801 1802 #ifdef DDB 1803 #include <machine/db_machdep.h> 1804 #include <ddb/db_output.h> 1805 1806 void 1807 db_print_sa(struct sockaddr *sa) 1808 { 1809 int len; 1810 u_char *p; 1811 1812 if (sa == NULL) { 1813 db_printf("[NULL]"); 1814 return; 1815 } 1816 1817 p = (u_char *)sa; 1818 len = sa->sa_len; 1819 db_printf("["); 1820 while (len > 0) { 1821 db_printf("%d", *p); 1822 p++; 1823 len--; 1824 if (len) 1825 db_printf(","); 1826 } 1827 db_printf("]\n"); 1828 } 1829 1830 void 1831 db_print_ifa(struct ifaddr *ifa) 1832 { 1833 if (ifa == NULL) 1834 return; 1835 db_printf(" ifa_addr="); 1836 db_print_sa(ifa->ifa_addr); 1837 db_printf(" ifa_dsta="); 1838 db_print_sa(ifa->ifa_dstaddr); 1839 db_printf(" ifa_mask="); 1840 db_print_sa(ifa->ifa_netmask); 1841 db_printf(" flags=0x%x, refcnt=%d, metric=%d\n", 1842 ifa->ifa_flags, ifa->ifa_refcnt, ifa->ifa_metric); 1843 } 1844 1845 /* 1846 * Function to pass to rtalble_walk(). 1847 * Return non-zero error to abort walk. 1848 */ 1849 int 1850 db_show_rtentry(struct rtentry *rt, void *w, unsigned int id) 1851 { 1852 db_printf("rtentry=%p", rt); 1853 1854 db_printf(" flags=0x%x refcnt=%d use=%llu expire=%lld rtableid=%u\n", 1855 rt->rt_flags, rt->rt_refcnt, rt->rt_use, rt->rt_expire, id); 1856 1857 db_printf(" key="); db_print_sa(rt_key(rt)); 1858 db_printf(" plen=%d", rt_plen(rt)); 1859 db_printf(" gw="); db_print_sa(rt->rt_gateway); 1860 db_printf(" ifidx=%u ", rt->rt_ifidx); 1861 db_printf(" ifa=%p\n", rt->rt_ifa); 1862 db_print_ifa(rt->rt_ifa); 1863 1864 db_printf(" gwroute=%p llinfo=%p\n", rt->rt_gwroute, rt->rt_llinfo); 1865 return (0); 1866 } 1867 1868 /* 1869 * Function to print all the route trees. 1870 * Use this from ddb: "call db_show_arptab" 1871 */ 1872 int 1873 db_show_arptab(void) 1874 { 1875 db_printf("Route tree for AF_INET\n"); 1876 rtable_walk(0, AF_INET, NULL, db_show_rtentry, NULL); 1877 return (0); 1878 } 1879 #endif /* DDB */ 1880