1 /* 2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1988, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)rtsock.c 8.7 (Berkeley) 10/12/95 62 * $FreeBSD: src/sys/net/rtsock.c,v 1.44.2.11 2002/12/04 14:05:41 ru Exp $ 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/kernel.h> 68 #include <sys/sysctl.h> 69 #include <sys/proc.h> 70 #include <sys/priv.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/protosw.h> 74 #include <sys/socket.h> 75 #include <sys/socketvar.h> 76 #include <sys/domain.h> 77 #include <sys/jail.h> 78 79 #include <sys/thread2.h> 80 #include <sys/socketvar2.h> 81 82 #include <net/if.h> 83 #include <net/if_var.h> 84 #include <net/route.h> 85 #include <net/raw_cb.h> 86 #include <net/netmsg2.h> 87 #include <net/netisr2.h> 88 89 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables"); 90 91 static struct route_cb { 92 int ip_count; 93 int ip6_count; 94 int ns_count; 95 int any_count; 96 } route_cb; 97 98 static const struct sockaddr route_src = { 2, PF_ROUTE, }; 99 100 struct walkarg { 101 int w_tmemsize; 102 int w_op, w_arg; 103 void *w_tmem; 104 struct sysctl_req *w_req; 105 }; 106 107 #ifndef RTTABLE_DUMP_MSGCNT_MAX 108 /* Should be large enough for dupkeys */ 109 #define RTTABLE_DUMP_MSGCNT_MAX 64 110 #endif 111 112 struct rttable_walkarg { 113 int w_op; 114 int w_arg; 115 int w_bufsz; 116 void *w_buf; 117 118 int w_buflen; 119 120 const char *w_key; 121 const char *w_mask; 122 123 struct sockaddr_storage w_key0; 124 struct sockaddr_storage w_mask0; 125 }; 126 127 struct netmsg_rttable_walk { 128 struct netmsg_base base; 129 int af; 130 struct rttable_walkarg *w; 131 }; 132 133 struct routecb { 134 struct rawcb rocb_rcb; 135 unsigned int rocb_msgfilter; 136 }; 137 #define sotoroutecb(so) ((struct routecb *)(so)->so_pcb) 138 139 static struct mbuf * 140 rt_msg_mbuf (int, struct rt_addrinfo *); 141 static void rt_msg_buffer (int, struct rt_addrinfo *, void *buf, int len); 142 static int rt_msgsize(int type, const struct rt_addrinfo *rtinfo); 143 static int rt_xaddrs (char *, char *, struct rt_addrinfo *); 144 static int sysctl_rttable(int af, struct sysctl_req *req, int op, int arg); 145 static int if_addrflags(const struct ifaddr *ifa); 146 static int sysctl_iflist (int af, struct walkarg *w); 147 static int route_output(struct mbuf *, struct socket *, ...); 148 static void rt_setmetrics (u_long, struct rt_metrics *, 149 struct rt_metrics *); 150 151 /* 152 * It really doesn't make any sense at all for this code to share much 153 * with raw_usrreq.c, since its functionality is so restricted. XXX 154 */ 155 static void 156 rts_abort(netmsg_t msg) 157 { 158 crit_enter(); 159 raw_usrreqs.pru_abort(msg); 160 /* msg invalid now */ 161 crit_exit(); 162 } 163 164 static int 165 rts_filter(struct mbuf *m, const struct sockproto *proto, 166 const struct rawcb *rp) 167 { 168 const struct routecb *rop = (const struct routecb *)rp; 169 const struct rt_msghdr *rtm; 170 171 KKASSERT(m != NULL); 172 KKASSERT(proto != NULL); 173 KKASSERT(rp != NULL); 174 175 /* Wrong family for this socket. */ 176 if (proto->sp_family != PF_ROUTE) 177 return ENOPROTOOPT; 178 179 /* If no filter set, just return. */ 180 if (rop->rocb_msgfilter == 0) 181 return 0; 182 183 /* Ensure we can access rtm_type */ 184 if (m->m_len < 185 offsetof(struct rt_msghdr, rtm_type) + sizeof(rtm->rtm_type)) 186 return EINVAL; 187 188 rtm = mtod(m, const struct rt_msghdr *); 189 /* If the rtm type is filtered out, return a positive. */ 190 if (!(rop->rocb_msgfilter & ROUTE_FILTER(rtm->rtm_type))) 191 return EEXIST; 192 193 /* Passed the filter. */ 194 return 0; 195 } 196 197 198 /* pru_accept is EOPNOTSUPP */ 199 200 static void 201 rts_attach(netmsg_t msg) 202 { 203 struct socket *so = msg->base.nm_so; 204 struct pru_attach_info *ai = msg->attach.nm_ai; 205 struct rawcb *rp; 206 struct routecb *rop; 207 int proto = msg->attach.nm_proto; 208 int error; 209 210 crit_enter(); 211 if (sotorawcb(so) != NULL) { 212 error = EISCONN; 213 goto done; 214 } 215 216 rop = kmalloc(sizeof *rop, M_PCB, M_WAITOK | M_ZERO); 217 rp = &rop->rocb_rcb; 218 219 /* 220 * The critical section is necessary to block protocols from sending 221 * error notifications (like RTM_REDIRECT or RTM_LOSING) while 222 * this PCB is extant but incompletely initialized. 223 * Probably we should try to do more of this work beforehand and 224 * eliminate the critical section. 225 */ 226 so->so_pcb = rp; 227 soreference(so); /* so_pcb assignment */ 228 error = raw_attach(so, proto, ai->sb_rlimit); 229 rp = sotorawcb(so); 230 if (error) { 231 kfree(rop, M_PCB); 232 goto done; 233 } 234 switch(rp->rcb_proto.sp_protocol) { 235 case AF_INET: 236 route_cb.ip_count++; 237 break; 238 case AF_INET6: 239 route_cb.ip6_count++; 240 break; 241 } 242 rp->rcb_faddr = &route_src; 243 rp->rcb_filter = rts_filter; 244 route_cb.any_count++; 245 soisconnected(so); 246 so->so_options |= SO_USELOOPBACK; 247 error = 0; 248 done: 249 crit_exit(); 250 lwkt_replymsg(&msg->lmsg, error); 251 } 252 253 static void 254 rts_bind(netmsg_t msg) 255 { 256 crit_enter(); 257 raw_usrreqs.pru_bind(msg); /* xxx just EINVAL */ 258 /* msg invalid now */ 259 crit_exit(); 260 } 261 262 static void 263 rts_connect(netmsg_t msg) 264 { 265 crit_enter(); 266 raw_usrreqs.pru_connect(msg); /* XXX just EINVAL */ 267 /* msg invalid now */ 268 crit_exit(); 269 } 270 271 /* pru_connect2 is EOPNOTSUPP */ 272 /* pru_control is EOPNOTSUPP */ 273 274 static void 275 rts_detach(netmsg_t msg) 276 { 277 struct socket *so = msg->base.nm_so; 278 struct rawcb *rp = sotorawcb(so); 279 280 crit_enter(); 281 if (rp != NULL) { 282 switch(rp->rcb_proto.sp_protocol) { 283 case AF_INET: 284 route_cb.ip_count--; 285 break; 286 case AF_INET6: 287 route_cb.ip6_count--; 288 break; 289 } 290 route_cb.any_count--; 291 } 292 raw_usrreqs.pru_detach(msg); 293 /* msg invalid now */ 294 crit_exit(); 295 } 296 297 static void 298 rts_disconnect(netmsg_t msg) 299 { 300 crit_enter(); 301 raw_usrreqs.pru_disconnect(msg); 302 /* msg invalid now */ 303 crit_exit(); 304 } 305 306 /* pru_listen is EOPNOTSUPP */ 307 308 static void 309 rts_peeraddr(netmsg_t msg) 310 { 311 crit_enter(); 312 raw_usrreqs.pru_peeraddr(msg); 313 /* msg invalid now */ 314 crit_exit(); 315 } 316 317 /* pru_rcvd is EOPNOTSUPP */ 318 /* pru_rcvoob is EOPNOTSUPP */ 319 320 static void 321 rts_send(netmsg_t msg) 322 { 323 crit_enter(); 324 raw_usrreqs.pru_send(msg); 325 /* msg invalid now */ 326 crit_exit(); 327 } 328 329 /* pru_sense is null */ 330 331 static void 332 rts_shutdown(netmsg_t msg) 333 { 334 crit_enter(); 335 raw_usrreqs.pru_shutdown(msg); 336 /* msg invalid now */ 337 crit_exit(); 338 } 339 340 static void 341 rts_sockaddr(netmsg_t msg) 342 { 343 crit_enter(); 344 raw_usrreqs.pru_sockaddr(msg); 345 /* msg invalid now */ 346 crit_exit(); 347 } 348 349 static struct pr_usrreqs route_usrreqs = { 350 .pru_abort = rts_abort, 351 .pru_accept = pr_generic_notsupp, 352 .pru_attach = rts_attach, 353 .pru_bind = rts_bind, 354 .pru_connect = rts_connect, 355 .pru_connect2 = pr_generic_notsupp, 356 .pru_control = pr_generic_notsupp, 357 .pru_detach = rts_detach, 358 .pru_disconnect = rts_disconnect, 359 .pru_listen = pr_generic_notsupp, 360 .pru_peeraddr = rts_peeraddr, 361 .pru_rcvd = pr_generic_notsupp, 362 .pru_rcvoob = pr_generic_notsupp, 363 .pru_send = rts_send, 364 .pru_sense = pru_sense_null, 365 .pru_shutdown = rts_shutdown, 366 .pru_sockaddr = rts_sockaddr, 367 .pru_sosend = sosend, 368 .pru_soreceive = soreceive 369 }; 370 371 static __inline sa_family_t 372 familyof(struct sockaddr *sa) 373 { 374 return (sa != NULL ? sa->sa_family : 0); 375 } 376 377 /* 378 * Routing socket input function. The packet must be serialized onto cpu 0. 379 * We use the cpu0_soport() netisr processing loop to handle it. 380 * 381 * This looks messy but it means that anyone, including interrupt code, 382 * can send a message to the routing socket. 383 */ 384 static void 385 rts_input_handler(netmsg_t msg) 386 { 387 static const struct sockaddr route_dst = { 2, PF_ROUTE, }; 388 struct sockproto route_proto; 389 struct netmsg_packet *pmsg = &msg->packet; 390 struct mbuf *m; 391 sa_family_t family; 392 struct rawcb *skip; 393 394 family = pmsg->base.lmsg.u.ms_result; 395 route_proto.sp_family = PF_ROUTE; 396 route_proto.sp_protocol = family; 397 398 m = pmsg->nm_packet; 399 M_ASSERTPKTHDR(m); 400 401 skip = m->m_pkthdr.header; 402 m->m_pkthdr.header = NULL; 403 404 raw_input(m, &route_proto, &route_src, &route_dst, skip); 405 } 406 407 static void 408 rts_input_skip(struct mbuf *m, sa_family_t family, struct rawcb *skip) 409 { 410 struct netmsg_packet *pmsg; 411 lwkt_port_t port; 412 413 M_ASSERTPKTHDR(m); 414 415 port = netisr_cpuport(0); /* XXX same as for routing socket */ 416 pmsg = &m->m_hdr.mh_netmsg; 417 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport, 418 0, rts_input_handler); 419 pmsg->nm_packet = m; 420 pmsg->base.lmsg.u.ms_result = family; 421 m->m_pkthdr.header = skip; /* XXX steal field in pkthdr */ 422 lwkt_sendmsg(port, &pmsg->base.lmsg); 423 } 424 425 static __inline void 426 rts_input(struct mbuf *m, sa_family_t family) 427 { 428 rts_input_skip(m, family, NULL); 429 } 430 431 static void 432 route_ctloutput(netmsg_t msg) 433 { 434 struct socket *so = msg->ctloutput.base.nm_so; 435 struct sockopt *sopt = msg->ctloutput.nm_sopt; 436 struct routecb *rop = sotoroutecb(so); 437 int error; 438 unsigned int msgfilter; 439 440 if (sopt->sopt_level != AF_ROUTE) { 441 error = EINVAL; 442 goto out; 443 } 444 445 error = 0; 446 447 switch (sopt->sopt_dir) { 448 case SOPT_SET: 449 switch (sopt->sopt_name) { 450 case ROUTE_MSGFILTER: 451 error = soopt_to_kbuf(sopt, &msgfilter, 452 sizeof(msgfilter), sizeof(msgfilter)); 453 if (error == 0) 454 rop->rocb_msgfilter = msgfilter; 455 break; 456 default: 457 error = ENOPROTOOPT; 458 break; 459 } 460 break; 461 case SOPT_GET: 462 switch (sopt->sopt_name) { 463 case ROUTE_MSGFILTER: 464 msgfilter = rop->rocb_msgfilter; 465 soopt_from_kbuf(sopt, &msgfilter, sizeof(msgfilter)); 466 break; 467 default: 468 error = ENOPROTOOPT; 469 break; 470 } 471 } 472 out: 473 lwkt_replymsg(&msg->ctloutput.base.lmsg, error); 474 } 475 476 477 478 static void * 479 reallocbuf_nofree(void *ptr, size_t len, size_t olen) 480 { 481 void *newptr; 482 483 newptr = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK); 484 if (newptr == NULL) 485 return NULL; 486 bcopy(ptr, newptr, olen); 487 return (newptr); 488 } 489 490 /* 491 * Internal helper routine for route_output(). 492 */ 493 static int 494 _fillrtmsg(struct rt_msghdr **prtm, struct rtentry *rt, 495 struct rt_addrinfo *rtinfo) 496 { 497 int msglen; 498 struct rt_msghdr *rtm = *prtm; 499 500 /* Fill in rt_addrinfo for call to rt_msg_buffer(). */ 501 rtinfo->rti_dst = rt_key(rt); 502 rtinfo->rti_gateway = rt->rt_gateway; 503 rtinfo->rti_netmask = rt_mask(rt); /* might be NULL */ 504 rtinfo->rti_genmask = rt->rt_genmask; /* might be NULL */ 505 if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) { 506 if (rt->rt_ifp != NULL) { 507 rtinfo->rti_ifpaddr = 508 TAILQ_FIRST(&rt->rt_ifp->if_addrheads[mycpuid]) 509 ->ifa->ifa_addr; 510 rtinfo->rti_ifaaddr = rt->rt_ifa->ifa_addr; 511 if (rt->rt_ifp->if_flags & IFF_POINTOPOINT) 512 rtinfo->rti_bcastaddr = rt->rt_ifa->ifa_dstaddr; 513 rtm->rtm_index = rt->rt_ifp->if_index; 514 } else { 515 rtinfo->rti_ifpaddr = NULL; 516 rtinfo->rti_ifaaddr = NULL; 517 } 518 } else if (rt->rt_ifp != NULL) { 519 rtm->rtm_index = rt->rt_ifp->if_index; 520 } 521 522 msglen = rt_msgsize(rtm->rtm_type, rtinfo); 523 if (rtm->rtm_msglen < msglen) { 524 /* NOTE: Caller will free the old rtm accordingly */ 525 rtm = reallocbuf_nofree(rtm, msglen, rtm->rtm_msglen); 526 if (rtm == NULL) 527 return (ENOBUFS); 528 *prtm = rtm; 529 } 530 rt_msg_buffer(rtm->rtm_type, rtinfo, rtm, msglen); 531 532 rtm->rtm_flags = rt->rt_flags; 533 rtm->rtm_rmx = rt->rt_rmx; 534 rtm->rtm_addrs = rtinfo->rti_addrs; 535 536 return (0); 537 } 538 539 struct rtm_arg { 540 struct rt_msghdr *bak_rtm; 541 struct rt_msghdr *new_rtm; 542 }; 543 544 static int 545 fillrtmsg(struct rtm_arg *arg, struct rtentry *rt, 546 struct rt_addrinfo *rtinfo) 547 { 548 struct rt_msghdr *rtm = arg->new_rtm; 549 int error; 550 551 error = _fillrtmsg(&rtm, rt, rtinfo); 552 if (!error) { 553 if (arg->new_rtm != rtm) { 554 /* 555 * _fillrtmsg() just allocated a new rtm; 556 * if the previously allocated rtm is not 557 * the backing rtm, it should be freed. 558 */ 559 if (arg->new_rtm != arg->bak_rtm) 560 kfree(arg->new_rtm, M_RTABLE); 561 arg->new_rtm = rtm; 562 } 563 } 564 return error; 565 } 566 567 static void route_output_add_callback(int, int, struct rt_addrinfo *, 568 struct rtentry *, void *); 569 static void route_output_delete_callback(int, int, struct rt_addrinfo *, 570 struct rtentry *, void *); 571 static int route_output_get_callback(int, struct rt_addrinfo *, 572 struct rtentry *, void *, int); 573 static int route_output_change_callback(int, struct rt_addrinfo *, 574 struct rtentry *, void *, int); 575 static int route_output_lock_callback(int, struct rt_addrinfo *, 576 struct rtentry *, void *, int); 577 578 /*ARGSUSED*/ 579 static int 580 route_output(struct mbuf *m, struct socket *so, ...) 581 { 582 struct rtm_arg arg; 583 struct rt_msghdr *rtm = NULL; 584 struct rawcb *rp = NULL; 585 struct pr_output_info *oi; 586 struct rt_addrinfo rtinfo; 587 sa_family_t family; 588 int len, error = 0; 589 __va_list ap; 590 591 M_ASSERTPKTHDR(m); 592 593 __va_start(ap, so); 594 oi = __va_arg(ap, struct pr_output_info *); 595 __va_end(ap); 596 597 family = familyof(NULL); 598 599 #define gotoerr(e) { error = e; goto flush;} 600 601 if (m == NULL || 602 (m->m_len < sizeof(long) && 603 (m = m_pullup(m, sizeof(long))) == NULL)) 604 return (ENOBUFS); 605 len = m->m_pkthdr.len; 606 if (len < sizeof(struct rt_msghdr) || 607 len != mtod(m, struct rt_msghdr *)->rtm_msglen) 608 gotoerr(EINVAL); 609 610 rtm = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK); 611 if (rtm == NULL) 612 gotoerr(ENOBUFS); 613 614 m_copydata(m, 0, len, (caddr_t)rtm); 615 if (rtm->rtm_version != RTM_VERSION) 616 gotoerr(EPROTONOSUPPORT); 617 618 rtm->rtm_pid = oi->p_pid; 619 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 620 rtinfo.rti_addrs = rtm->rtm_addrs; 621 if (rt_xaddrs((char *)(rtm + 1), (char *)rtm + len, &rtinfo) != 0) 622 gotoerr(EINVAL); 623 624 rtinfo.rti_flags = rtm->rtm_flags; 625 if (rtinfo.rti_dst == NULL || rtinfo.rti_dst->sa_family >= AF_MAX || 626 (rtinfo.rti_gateway && rtinfo.rti_gateway->sa_family >= AF_MAX)) 627 gotoerr(EINVAL); 628 629 family = familyof(rtinfo.rti_dst); 630 631 /* 632 * Verify that the caller has the appropriate privilege; RTM_GET 633 * is the only operation the non-superuser is allowed. 634 */ 635 if (rtm->rtm_type != RTM_GET && 636 priv_check_cred(so->so_cred, PRIV_ROOT, 0) != 0) 637 gotoerr(EPERM); 638 639 if (rtinfo.rti_genmask != NULL) { 640 error = rtmask_add_global(rtinfo.rti_genmask, 641 rtm->rtm_type != RTM_GET ? 642 RTREQ_PRIO_HIGH : RTREQ_PRIO_NORM); 643 if (error) 644 goto flush; 645 } 646 647 switch (rtm->rtm_type) { 648 case RTM_ADD: 649 if (rtinfo.rti_gateway == NULL) { 650 error = EINVAL; 651 } else { 652 error = rtrequest1_global(RTM_ADD, &rtinfo, 653 route_output_add_callback, rtm, RTREQ_PRIO_HIGH); 654 } 655 break; 656 case RTM_DELETE: 657 /* 658 * Backing rtm (bak_rtm) could _not_ be freed during 659 * rtrequest1_global or rtsearch_global, even if the 660 * callback reallocates the rtm due to its size changes, 661 * since rtinfo points to the backing rtm's memory area. 662 * After rtrequest1_global or rtsearch_global returns, 663 * it is safe to free the backing rtm, since rtinfo will 664 * not be used anymore. 665 * 666 * new_rtm will be used to save the new rtm allocated 667 * by rtrequest1_global or rtsearch_global. 668 */ 669 arg.bak_rtm = rtm; 670 arg.new_rtm = rtm; 671 error = rtrequest1_global(RTM_DELETE, &rtinfo, 672 route_output_delete_callback, &arg, RTREQ_PRIO_HIGH); 673 rtm = arg.new_rtm; 674 if (rtm != arg.bak_rtm) 675 kfree(arg.bak_rtm, M_RTABLE); 676 break; 677 case RTM_GET: 678 /* See the comment in RTM_DELETE */ 679 arg.bak_rtm = rtm; 680 arg.new_rtm = rtm; 681 error = rtsearch_global(RTM_GET, &rtinfo, 682 route_output_get_callback, &arg, RTS_NOEXACTMATCH, 683 RTREQ_PRIO_NORM); 684 rtm = arg.new_rtm; 685 if (rtm != arg.bak_rtm) 686 kfree(arg.bak_rtm, M_RTABLE); 687 break; 688 case RTM_CHANGE: 689 error = rtsearch_global(RTM_CHANGE, &rtinfo, 690 route_output_change_callback, rtm, RTS_EXACTMATCH, 691 RTREQ_PRIO_HIGH); 692 break; 693 case RTM_LOCK: 694 error = rtsearch_global(RTM_LOCK, &rtinfo, 695 route_output_lock_callback, rtm, RTS_EXACTMATCH, 696 RTREQ_PRIO_HIGH); 697 break; 698 default: 699 error = EOPNOTSUPP; 700 break; 701 } 702 flush: 703 if (rtm != NULL) { 704 if (error != 0) 705 rtm->rtm_errno = error; 706 else 707 rtm->rtm_flags |= RTF_DONE; 708 } 709 710 /* 711 * Check to see if we don't want our own messages. 712 */ 713 if (!(so->so_options & SO_USELOOPBACK)) { 714 if (route_cb.any_count <= 1) { 715 if (rtm != NULL) 716 kfree(rtm, M_RTABLE); 717 m_freem(m); 718 return (error); 719 } 720 /* There is another listener, so construct message */ 721 rp = sotorawcb(so); 722 } 723 if (rtm != NULL) { 724 m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm); 725 if (m->m_pkthdr.len < rtm->rtm_msglen) { 726 m_freem(m); 727 m = NULL; 728 } else if (m->m_pkthdr.len > rtm->rtm_msglen) 729 m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len); 730 kfree(rtm, M_RTABLE); 731 } 732 if (m != NULL) 733 rts_input_skip(m, family, rp); 734 return (error); 735 } 736 737 static void 738 route_output_add_callback(int cmd, int error, struct rt_addrinfo *rtinfo, 739 struct rtentry *rt, void *arg) 740 { 741 struct rt_msghdr *rtm = arg; 742 743 if (error == 0 && rt != NULL) { 744 rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, 745 &rt->rt_rmx); 746 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); 747 rt->rt_rmx.rmx_locks |= 748 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks); 749 if (rtinfo->rti_genmask != NULL) { 750 rt->rt_genmask = rtmask_purelookup(rtinfo->rti_genmask); 751 if (rt->rt_genmask == NULL) { 752 /* 753 * This should not happen, since we 754 * have already installed genmask 755 * on each CPU before we reach here. 756 */ 757 panic("genmask is gone!?"); 758 } 759 } else { 760 rt->rt_genmask = NULL; 761 } 762 rtm->rtm_index = rt->rt_ifp->if_index; 763 } 764 } 765 766 static void 767 route_output_delete_callback(int cmd, int error, struct rt_addrinfo *rtinfo, 768 struct rtentry *rt, void *arg) 769 { 770 if (error == 0 && rt) { 771 ++rt->rt_refcnt; 772 if (fillrtmsg(arg, rt, rtinfo) != 0) { 773 error = ENOBUFS; 774 /* XXX no way to return the error */ 775 } 776 --rt->rt_refcnt; 777 } 778 if (rt && rt->rt_refcnt == 0) { 779 ++rt->rt_refcnt; 780 rtfree(rt); 781 } 782 } 783 784 static int 785 route_output_get_callback(int cmd, struct rt_addrinfo *rtinfo, 786 struct rtentry *rt, void *arg, int found_cnt) 787 { 788 int error, found = 0; 789 790 if (((rtinfo->rti_flags ^ rt->rt_flags) & RTF_HOST) == 0) 791 found = 1; 792 793 error = fillrtmsg(arg, rt, rtinfo); 794 if (!error && found) { 795 /* Got the exact match, we could return now! */ 796 error = EJUSTRETURN; 797 } 798 return error; 799 } 800 801 static int 802 route_output_change_callback(int cmd, struct rt_addrinfo *rtinfo, 803 struct rtentry *rt, void *arg, int found_cnt) 804 { 805 struct rt_msghdr *rtm = arg; 806 struct ifaddr *ifa; 807 int error = 0; 808 809 /* 810 * new gateway could require new ifaddr, ifp; 811 * flags may also be different; ifp may be specified 812 * by ll sockaddr when protocol address is ambiguous 813 */ 814 if (((rt->rt_flags & RTF_GATEWAY) && rtinfo->rti_gateway != NULL) || 815 rtinfo->rti_ifpaddr != NULL || 816 (rtinfo->rti_ifaaddr != NULL && 817 !sa_equal(rtinfo->rti_ifaaddr, rt->rt_ifa->ifa_addr))) { 818 error = rt_getifa(rtinfo); 819 if (error != 0) 820 goto done; 821 } 822 if (rtinfo->rti_gateway != NULL) { 823 /* 824 * We only need to generate rtmsg upon the 825 * first route to be changed. 826 */ 827 error = rt_setgate(rt, rt_key(rt), rtinfo->rti_gateway); 828 if (error != 0) 829 goto done; 830 } 831 if ((ifa = rtinfo->rti_ifa) != NULL) { 832 struct ifaddr *oifa = rt->rt_ifa; 833 834 if (oifa != ifa) { 835 if (oifa && oifa->ifa_rtrequest) 836 oifa->ifa_rtrequest(RTM_DELETE, rt); 837 IFAFREE(rt->rt_ifa); 838 IFAREF(ifa); 839 rt->rt_ifa = ifa; 840 rt->rt_ifp = rtinfo->rti_ifp; 841 } 842 } 843 rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, &rt->rt_rmx); 844 if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest) 845 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt); 846 if (rtinfo->rti_genmask != NULL) { 847 rt->rt_genmask = rtmask_purelookup(rtinfo->rti_genmask); 848 if (rt->rt_genmask == NULL) { 849 /* 850 * This should not happen, since we 851 * have already installed genmask 852 * on each CPU before we reach here. 853 */ 854 panic("genmask is gone!?"); 855 } 856 } 857 rtm->rtm_index = rt->rt_ifp->if_index; 858 if (found_cnt == 1) 859 rt_rtmsg(RTM_CHANGE, rt, rt->rt_ifp, 0); 860 done: 861 return error; 862 } 863 864 static int 865 route_output_lock_callback(int cmd, struct rt_addrinfo *rtinfo, 866 struct rtentry *rt, void *arg, 867 int found_cnt __unused) 868 { 869 struct rt_msghdr *rtm = arg; 870 871 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); 872 rt->rt_rmx.rmx_locks |= 873 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks); 874 return 0; 875 } 876 877 static void 878 rt_setmetrics(u_long which, struct rt_metrics *in, struct rt_metrics *out) 879 { 880 #define setmetric(flag, elt) if (which & (flag)) out->elt = in->elt; 881 setmetric(RTV_RPIPE, rmx_recvpipe); 882 setmetric(RTV_SPIPE, rmx_sendpipe); 883 setmetric(RTV_SSTHRESH, rmx_ssthresh); 884 setmetric(RTV_RTT, rmx_rtt); 885 setmetric(RTV_RTTVAR, rmx_rttvar); 886 setmetric(RTV_HOPCOUNT, rmx_hopcount); 887 setmetric(RTV_MTU, rmx_mtu); 888 setmetric(RTV_EXPIRE, rmx_expire); 889 setmetric(RTV_MSL, rmx_msl); 890 setmetric(RTV_IWMAXSEGS, rmx_iwmaxsegs); 891 setmetric(RTV_IWCAPSEGS, rmx_iwcapsegs); 892 #undef setmetric 893 } 894 895 /* 896 * Extract the addresses of the passed sockaddrs. 897 * Do a little sanity checking so as to avoid bad memory references. 898 * This data is derived straight from userland. 899 */ 900 static int 901 rt_xaddrs(char *cp, char *cplim, struct rt_addrinfo *rtinfo) 902 { 903 struct sockaddr *sa; 904 int i; 905 906 for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) { 907 if ((rtinfo->rti_addrs & (1 << i)) == 0) 908 continue; 909 sa = (struct sockaddr *)cp; 910 /* 911 * It won't fit. 912 */ 913 if ((cp + sa->sa_len) > cplim) { 914 return (EINVAL); 915 } 916 917 /* 918 * There are no more... Quit now. 919 * If there are more bits, they are in error. 920 * I've seen this. route(1) can evidently generate these. 921 * This causes kernel to core dump. 922 * For compatibility, if we see this, point to a safe address. 923 */ 924 if (sa->sa_len == 0) { 925 static struct sockaddr sa_zero = { 926 sizeof sa_zero, AF_INET, 927 }; 928 929 rtinfo->rti_info[i] = &sa_zero; 930 kprintf("rtsock: received more addr bits than sockaddrs.\n"); 931 return (0); /* should be EINVAL but for compat */ 932 } 933 934 /* Accept the sockaddr. */ 935 rtinfo->rti_info[i] = sa; 936 cp += RT_ROUNDUP(sa->sa_len); 937 } 938 return (0); 939 } 940 941 static int 942 rt_msghdrsize(int type) 943 { 944 switch (type) { 945 case RTM_DELADDR: 946 case RTM_NEWADDR: 947 return sizeof(struct ifa_msghdr); 948 case RTM_DELMADDR: 949 case RTM_NEWMADDR: 950 return sizeof(struct ifma_msghdr); 951 case RTM_IFINFO: 952 return sizeof(struct if_msghdr); 953 case RTM_IFANNOUNCE: 954 case RTM_IEEE80211: 955 return sizeof(struct if_announcemsghdr); 956 default: 957 return sizeof(struct rt_msghdr); 958 } 959 } 960 961 static int 962 rt_msgsize(int type, const struct rt_addrinfo *rtinfo) 963 { 964 int len, i; 965 966 len = rt_msghdrsize(type); 967 for (i = 0; i < RTAX_MAX; i++) { 968 if (rtinfo->rti_info[i] != NULL) 969 len += RT_ROUNDUP(rtinfo->rti_info[i]->sa_len); 970 } 971 len = ALIGN(len); 972 return len; 973 } 974 975 /* 976 * Build a routing message in a buffer. 977 * Copy the addresses in the rtinfo->rti_info[] sockaddr array 978 * to the end of the buffer after the message header. 979 * 980 * Set the rtinfo->rti_addrs bitmask of addresses present in rtinfo->rti_info[]. 981 * This side-effect can be avoided if we reorder the addrs bitmask field in all 982 * the route messages to line up so we can set it here instead of back in the 983 * calling routine. 984 */ 985 static void 986 rt_msg_buffer(int type, struct rt_addrinfo *rtinfo, void *buf, int msglen) 987 { 988 struct rt_msghdr *rtm; 989 char *cp; 990 int dlen, i; 991 992 rtm = (struct rt_msghdr *) buf; 993 rtm->rtm_version = RTM_VERSION; 994 rtm->rtm_type = type; 995 rtm->rtm_msglen = msglen; 996 997 cp = (char *)buf + rt_msghdrsize(type); 998 rtinfo->rti_addrs = 0; 999 for (i = 0; i < RTAX_MAX; i++) { 1000 struct sockaddr *sa; 1001 1002 if ((sa = rtinfo->rti_info[i]) == NULL) 1003 continue; 1004 rtinfo->rti_addrs |= (1 << i); 1005 dlen = RT_ROUNDUP(sa->sa_len); 1006 bcopy(sa, cp, dlen); 1007 cp += dlen; 1008 } 1009 } 1010 1011 /* 1012 * Build a routing message in a mbuf chain. 1013 * Copy the addresses in the rtinfo->rti_info[] sockaddr array 1014 * to the end of the mbuf after the message header. 1015 * 1016 * Set the rtinfo->rti_addrs bitmask of addresses present in rtinfo->rti_info[]. 1017 * This side-effect can be avoided if we reorder the addrs bitmask field in all 1018 * the route messages to line up so we can set it here instead of back in the 1019 * calling routine. 1020 */ 1021 static struct mbuf * 1022 rt_msg_mbuf(int type, struct rt_addrinfo *rtinfo) 1023 { 1024 struct mbuf *m; 1025 struct rt_msghdr *rtm; 1026 int hlen, len; 1027 int i; 1028 1029 hlen = rt_msghdrsize(type); 1030 KASSERT(hlen <= MCLBYTES, ("rt_msg_mbuf: hlen %d doesn't fit", hlen)); 1031 1032 m = m_getl(hlen, M_NOWAIT, MT_DATA, M_PKTHDR, NULL); 1033 if (m == NULL) 1034 return (NULL); 1035 mbuftrackid(m, 32); 1036 m->m_pkthdr.len = m->m_len = hlen; 1037 m->m_pkthdr.rcvif = NULL; 1038 rtinfo->rti_addrs = 0; 1039 len = hlen; 1040 for (i = 0; i < RTAX_MAX; i++) { 1041 struct sockaddr *sa; 1042 int dlen; 1043 1044 if ((sa = rtinfo->rti_info[i]) == NULL) 1045 continue; 1046 rtinfo->rti_addrs |= (1 << i); 1047 dlen = RT_ROUNDUP(sa->sa_len); 1048 m_copyback(m, len, dlen, (caddr_t)sa); /* can grow mbuf chain */ 1049 len += dlen; 1050 } 1051 if (m->m_pkthdr.len != len) { /* one of the m_copyback() calls failed */ 1052 m_freem(m); 1053 return (NULL); 1054 } 1055 rtm = mtod(m, struct rt_msghdr *); 1056 bzero(rtm, hlen); 1057 rtm->rtm_msglen = len; 1058 rtm->rtm_version = RTM_VERSION; 1059 rtm->rtm_type = type; 1060 return (m); 1061 } 1062 1063 /* 1064 * This routine is called to generate a message from the routing 1065 * socket indicating that a redirect has occurred, a routing lookup 1066 * has failed, or that a protocol has detected timeouts to a particular 1067 * destination. 1068 */ 1069 void 1070 rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error) 1071 { 1072 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 1073 struct rt_msghdr *rtm; 1074 struct mbuf *m; 1075 1076 if (route_cb.any_count == 0) 1077 return; 1078 m = rt_msg_mbuf(type, rtinfo); 1079 if (m == NULL) 1080 return; 1081 rtm = mtod(m, struct rt_msghdr *); 1082 rtm->rtm_flags = RTF_DONE | flags; 1083 rtm->rtm_errno = error; 1084 rtm->rtm_addrs = rtinfo->rti_addrs; 1085 rts_input(m, familyof(dst)); 1086 } 1087 1088 void 1089 rt_dstmsg(int type, struct sockaddr *dst, int error) 1090 { 1091 struct rt_msghdr *rtm; 1092 struct rt_addrinfo addrs; 1093 struct mbuf *m; 1094 1095 if (route_cb.any_count == 0) 1096 return; 1097 bzero(&addrs, sizeof(struct rt_addrinfo)); 1098 addrs.rti_info[RTAX_DST] = dst; 1099 m = rt_msg_mbuf(type, &addrs); 1100 if (m == NULL) 1101 return; 1102 rtm = mtod(m, struct rt_msghdr *); 1103 rtm->rtm_flags = RTF_DONE; 1104 rtm->rtm_errno = error; 1105 rtm->rtm_addrs = addrs.rti_addrs; 1106 rts_input(m, familyof(dst)); 1107 } 1108 1109 /* 1110 * This routine is called to generate a message from the routing 1111 * socket indicating that the status of a network interface has changed. 1112 */ 1113 void 1114 rt_ifmsg(struct ifnet *ifp) 1115 { 1116 struct if_msghdr *ifm; 1117 struct mbuf *m; 1118 struct rt_addrinfo rtinfo; 1119 1120 if (route_cb.any_count == 0) 1121 return; 1122 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1123 m = rt_msg_mbuf(RTM_IFINFO, &rtinfo); 1124 if (m == NULL) 1125 return; 1126 ifm = mtod(m, struct if_msghdr *); 1127 ifm->ifm_index = ifp->if_index; 1128 ifm->ifm_flags = ifp->if_flags; 1129 ifm->ifm_data = ifp->if_data; 1130 ifm->ifm_addrs = 0; 1131 rts_input(m, 0); 1132 } 1133 1134 static void 1135 rt_ifamsg(int cmd, struct ifaddr *ifa) 1136 { 1137 struct ifa_msghdr *ifam; 1138 struct rt_addrinfo rtinfo; 1139 struct mbuf *m; 1140 struct ifnet *ifp = ifa->ifa_ifp; 1141 1142 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1143 rtinfo.rti_ifaaddr = ifa->ifa_addr; 1144 rtinfo.rti_ifpaddr = 1145 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1146 rtinfo.rti_netmask = ifa->ifa_netmask; 1147 rtinfo.rti_bcastaddr = ifa->ifa_dstaddr; 1148 1149 m = rt_msg_mbuf(cmd, &rtinfo); 1150 if (m == NULL) 1151 return; 1152 1153 ifam = mtod(m, struct ifa_msghdr *); 1154 ifam->ifam_index = ifp->if_index; 1155 ifam->ifam_flags = ifa->ifa_flags; 1156 ifam->ifam_addrs = rtinfo.rti_addrs; 1157 ifam->ifam_addrflags = if_addrflags(ifa); 1158 ifam->ifam_metric = ifa->ifa_metric; 1159 1160 rts_input(m, familyof(ifa->ifa_addr)); 1161 } 1162 1163 void 1164 rt_rtmsg(int cmd, struct rtentry *rt, struct ifnet *ifp, int error) 1165 { 1166 struct rt_msghdr *rtm; 1167 struct rt_addrinfo rtinfo; 1168 struct mbuf *m; 1169 struct sockaddr *dst; 1170 1171 if (rt == NULL) 1172 return; 1173 1174 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1175 rtinfo.rti_dst = dst = rt_key(rt); 1176 rtinfo.rti_gateway = rt->rt_gateway; 1177 rtinfo.rti_netmask = rt_mask(rt); 1178 if (ifp != NULL) { 1179 rtinfo.rti_ifpaddr = 1180 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1181 } 1182 if (rt->rt_ifa != NULL) 1183 rtinfo.rti_ifaaddr = rt->rt_ifa->ifa_addr; 1184 1185 m = rt_msg_mbuf(cmd, &rtinfo); 1186 if (m == NULL) 1187 return; 1188 1189 rtm = mtod(m, struct rt_msghdr *); 1190 if (ifp != NULL) 1191 rtm->rtm_index = ifp->if_index; 1192 rtm->rtm_flags |= rt->rt_flags; 1193 rtm->rtm_errno = error; 1194 rtm->rtm_addrs = rtinfo.rti_addrs; 1195 1196 rts_input(m, familyof(dst)); 1197 } 1198 1199 /* 1200 * This is called to generate messages from the routing socket 1201 * indicating a network interface has had addresses associated with it. 1202 * if we ever reverse the logic and replace messages TO the routing 1203 * socket indicate a request to configure interfaces, then it will 1204 * be unnecessary as the routing socket will automatically generate 1205 * copies of it. 1206 */ 1207 void 1208 rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt) 1209 { 1210 if (route_cb.any_count == 0) 1211 return; 1212 1213 if (cmd == RTM_ADD) { 1214 rt_ifamsg(RTM_NEWADDR, ifa); 1215 rt_rtmsg(RTM_ADD, rt, ifa->ifa_ifp, error); 1216 } else { 1217 KASSERT((cmd == RTM_DELETE), ("unknown cmd %d", cmd)); 1218 rt_rtmsg(RTM_DELETE, rt, ifa->ifa_ifp, error); 1219 rt_ifamsg(RTM_DELADDR, ifa); 1220 } 1221 } 1222 1223 /* 1224 * This is the analogue to the rt_newaddrmsg which performs the same 1225 * function but for multicast group memberhips. This is easier since 1226 * there is no route state to worry about. 1227 */ 1228 void 1229 rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma) 1230 { 1231 struct rt_addrinfo rtinfo; 1232 struct mbuf *m = NULL; 1233 struct ifnet *ifp = ifma->ifma_ifp; 1234 struct ifma_msghdr *ifmam; 1235 1236 if (route_cb.any_count == 0) 1237 return; 1238 1239 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1240 rtinfo.rti_ifaaddr = ifma->ifma_addr; 1241 if (ifp != NULL && !TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 1242 rtinfo.rti_ifpaddr = 1243 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1244 } 1245 /* 1246 * If a link-layer address is present, present it as a ``gateway'' 1247 * (similarly to how ARP entries, e.g., are presented). 1248 */ 1249 rtinfo.rti_gateway = ifma->ifma_lladdr; 1250 1251 m = rt_msg_mbuf(cmd, &rtinfo); 1252 if (m == NULL) 1253 return; 1254 1255 ifmam = mtod(m, struct ifma_msghdr *); 1256 ifmam->ifmam_index = ifp->if_index; 1257 ifmam->ifmam_addrs = rtinfo.rti_addrs; 1258 1259 rts_input(m, familyof(ifma->ifma_addr)); 1260 } 1261 1262 static struct mbuf * 1263 rt_makeifannouncemsg(struct ifnet *ifp, int type, int what, 1264 struct rt_addrinfo *info) 1265 { 1266 struct if_announcemsghdr *ifan; 1267 struct mbuf *m; 1268 1269 if (route_cb.any_count == 0) 1270 return NULL; 1271 1272 bzero(info, sizeof(*info)); 1273 m = rt_msg_mbuf(type, info); 1274 if (m == NULL) 1275 return NULL; 1276 1277 ifan = mtod(m, struct if_announcemsghdr *); 1278 ifan->ifan_index = ifp->if_index; 1279 strlcpy(ifan->ifan_name, ifp->if_xname, sizeof ifan->ifan_name); 1280 ifan->ifan_what = what; 1281 return m; 1282 } 1283 1284 /* 1285 * This is called to generate routing socket messages indicating 1286 * IEEE80211 wireless events. 1287 * XXX we piggyback on the RTM_IFANNOUNCE msg format in a clumsy way. 1288 */ 1289 void 1290 rt_ieee80211msg(struct ifnet *ifp, int what, void *data, size_t data_len) 1291 { 1292 struct rt_addrinfo info; 1293 struct mbuf *m; 1294 1295 m = rt_makeifannouncemsg(ifp, RTM_IEEE80211, what, &info); 1296 if (m == NULL) 1297 return; 1298 1299 /* 1300 * Append the ieee80211 data. Try to stick it in the 1301 * mbuf containing the ifannounce msg; otherwise allocate 1302 * a new mbuf and append. 1303 * 1304 * NB: we assume m is a single mbuf. 1305 */ 1306 if (data_len > M_TRAILINGSPACE(m)) { 1307 /* XXX use m_getb(data_len, M_NOWAIT, MT_DATA, 0); */ 1308 struct mbuf *n = m_get(M_NOWAIT, MT_DATA); 1309 if (n == NULL) { 1310 m_freem(m); 1311 return; 1312 } 1313 KKASSERT(data_len <= M_TRAILINGSPACE(n)); 1314 bcopy(data, mtod(n, void *), data_len); 1315 n->m_len = data_len; 1316 m->m_next = n; 1317 } else if (data_len > 0) { 1318 bcopy(data, mtod(m, u_int8_t *) + m->m_len, data_len); 1319 m->m_len += data_len; 1320 } 1321 mbuftrackid(m, 33); 1322 if (m->m_flags & M_PKTHDR) 1323 m->m_pkthdr.len += data_len; 1324 mtod(m, struct if_announcemsghdr *)->ifan_msglen += data_len; 1325 rts_input(m, 0); 1326 } 1327 1328 /* 1329 * This is called to generate routing socket messages indicating 1330 * network interface arrival and departure. 1331 */ 1332 void 1333 rt_ifannouncemsg(struct ifnet *ifp, int what) 1334 { 1335 struct rt_addrinfo addrinfo; 1336 struct mbuf *m; 1337 1338 m = rt_makeifannouncemsg(ifp, RTM_IFANNOUNCE, what, &addrinfo); 1339 if (m != NULL) 1340 rts_input(m, 0); 1341 } 1342 1343 static int 1344 resizewalkarg(struct walkarg *w, int len) 1345 { 1346 void *newptr; 1347 1348 newptr = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK); 1349 if (newptr == NULL) 1350 return (ENOMEM); 1351 if (w->w_tmem != NULL) 1352 kfree(w->w_tmem, M_RTABLE); 1353 w->w_tmem = newptr; 1354 w->w_tmemsize = len; 1355 return (0); 1356 } 1357 1358 static void 1359 ifnet_compute_stats(struct ifnet *ifp) 1360 { 1361 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1362 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1363 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1364 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1365 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1366 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1367 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1368 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1369 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1370 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1371 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1372 } 1373 1374 static int 1375 if_addrflags(const struct ifaddr *ifa) 1376 { 1377 switch (ifa->ifa_addr->sa_family) { 1378 #ifdef INET6 1379 case AF_INET6: 1380 return ((const struct in6_ifaddr *)ifa)->ia6_flags; 1381 #endif 1382 default: 1383 return 0; 1384 } 1385 } 1386 1387 static int 1388 sysctl_iflist(int af, struct walkarg *w) 1389 { 1390 struct ifnet *ifp; 1391 struct rt_addrinfo rtinfo; 1392 int msglen, error; 1393 1394 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1395 1396 ifnet_lock(); 1397 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1398 struct ifaddr_container *ifac, *ifac_mark; 1399 struct ifaddr_marker mark; 1400 struct ifaddrhead *head; 1401 struct ifaddr *ifa; 1402 1403 if (w->w_arg && w->w_arg != ifp->if_index) 1404 continue; 1405 head = &ifp->if_addrheads[mycpuid]; 1406 /* 1407 * There is no need to reference the first ifaddr 1408 * even if the following resizewalkarg() blocks, 1409 * since the first ifaddr will not be destroyed 1410 * when the ifnet lock is held. 1411 */ 1412 ifac = TAILQ_FIRST(head); 1413 ifa = ifac->ifa; 1414 rtinfo.rti_ifpaddr = ifa->ifa_addr; 1415 msglen = rt_msgsize(RTM_IFINFO, &rtinfo); 1416 if (w->w_tmemsize < msglen && resizewalkarg(w, msglen) != 0) { 1417 ifnet_unlock(); 1418 return (ENOMEM); 1419 } 1420 rt_msg_buffer(RTM_IFINFO, &rtinfo, w->w_tmem, msglen); 1421 rtinfo.rti_ifpaddr = NULL; 1422 if (w->w_req != NULL && w->w_tmem != NULL) { 1423 struct if_msghdr *ifm = w->w_tmem; 1424 1425 ifm->ifm_index = ifp->if_index; 1426 ifm->ifm_flags = ifp->if_flags; 1427 ifnet_compute_stats(ifp); 1428 ifm->ifm_data = ifp->if_data; 1429 ifm->ifm_addrs = rtinfo.rti_addrs; 1430 error = SYSCTL_OUT(w->w_req, ifm, msglen); 1431 if (error) { 1432 ifnet_unlock(); 1433 return (error); 1434 } 1435 } 1436 /* 1437 * Add a marker, since SYSCTL_OUT() could block and during 1438 * that period the list could be changed. 1439 */ 1440 ifa_marker_init(&mark, ifp); 1441 ifac_mark = &mark.ifac; 1442 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 1443 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 1444 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1445 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 1446 1447 ifa = ifac->ifa; 1448 1449 /* Ignore marker */ 1450 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1451 continue; 1452 1453 if (af && af != ifa->ifa_addr->sa_family) 1454 continue; 1455 if (curproc->p_ucred->cr_prison && 1456 prison_if(curproc->p_ucred, ifa->ifa_addr)) 1457 continue; 1458 rtinfo.rti_ifaaddr = ifa->ifa_addr; 1459 rtinfo.rti_netmask = ifa->ifa_netmask; 1460 rtinfo.rti_bcastaddr = ifa->ifa_dstaddr; 1461 msglen = rt_msgsize(RTM_NEWADDR, &rtinfo); 1462 /* 1463 * Keep a reference on this ifaddr, so that it will 1464 * not be destroyed if the following resizewalkarg() 1465 * blocks. 1466 */ 1467 IFAREF(ifa); 1468 if (w->w_tmemsize < msglen && 1469 resizewalkarg(w, msglen) != 0) { 1470 IFAFREE(ifa); 1471 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1472 ifnet_unlock(); 1473 return (ENOMEM); 1474 } 1475 rt_msg_buffer(RTM_NEWADDR, &rtinfo, w->w_tmem, msglen); 1476 if (w->w_req != NULL) { 1477 struct ifa_msghdr *ifam = w->w_tmem; 1478 1479 ifam->ifam_index = ifa->ifa_ifp->if_index; 1480 ifam->ifam_flags = ifa->ifa_flags; 1481 ifam->ifam_addrs = rtinfo.rti_addrs; 1482 ifam->ifam_addrflags = if_addrflags(ifa); 1483 ifam->ifam_metric = ifa->ifa_metric; 1484 error = SYSCTL_OUT(w->w_req, w->w_tmem, msglen); 1485 if (error) { 1486 IFAFREE(ifa); 1487 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1488 ifnet_unlock(); 1489 return (error); 1490 } 1491 } 1492 IFAFREE(ifa); 1493 } 1494 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1495 rtinfo.rti_netmask = NULL; 1496 rtinfo.rti_ifaaddr = NULL; 1497 rtinfo.rti_bcastaddr = NULL; 1498 } 1499 ifnet_unlock(); 1500 return (0); 1501 } 1502 1503 static int 1504 rttable_walkarg_create(struct rttable_walkarg *w, int op, int arg) 1505 { 1506 struct rt_addrinfo rtinfo; 1507 struct sockaddr_storage ss; 1508 int i, msglen; 1509 1510 memset(w, 0, sizeof(*w)); 1511 w->w_op = op; 1512 w->w_arg = arg; 1513 1514 memset(&ss, 0, sizeof(ss)); 1515 ss.ss_len = sizeof(ss); 1516 1517 memset(&rtinfo, 0, sizeof(rtinfo)); 1518 for (i = 0; i < RTAX_MAX; ++i) 1519 rtinfo.rti_info[i] = (struct sockaddr *)&ss; 1520 msglen = rt_msgsize(RTM_GET, &rtinfo); 1521 1522 w->w_bufsz = msglen * RTTABLE_DUMP_MSGCNT_MAX; 1523 w->w_buf = kmalloc(w->w_bufsz, M_TEMP, M_WAITOK | M_NULLOK); 1524 if (w->w_buf == NULL) 1525 return ENOMEM; 1526 return 0; 1527 } 1528 1529 static void 1530 rttable_walkarg_destroy(struct rttable_walkarg *w) 1531 { 1532 kfree(w->w_buf, M_TEMP); 1533 } 1534 1535 static void 1536 rttable_entry_rtinfo(struct rt_addrinfo *rtinfo, struct radix_node *rn) 1537 { 1538 struct rtentry *rt = (struct rtentry *)rn; 1539 1540 bzero(rtinfo, sizeof(*rtinfo)); 1541 rtinfo->rti_dst = rt_key(rt); 1542 rtinfo->rti_gateway = rt->rt_gateway; 1543 rtinfo->rti_netmask = rt_mask(rt); 1544 rtinfo->rti_genmask = rt->rt_genmask; 1545 if (rt->rt_ifp != NULL) { 1546 rtinfo->rti_ifpaddr = 1547 TAILQ_FIRST(&rt->rt_ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1548 rtinfo->rti_ifaaddr = rt->rt_ifa->ifa_addr; 1549 if (rt->rt_ifp->if_flags & IFF_POINTOPOINT) 1550 rtinfo->rti_bcastaddr = rt->rt_ifa->ifa_dstaddr; 1551 } 1552 } 1553 1554 static int 1555 rttable_walk_entry(struct radix_node *rn, void *xw) 1556 { 1557 struct rttable_walkarg *w = xw; 1558 struct rtentry *rt = (struct rtentry *)rn; 1559 struct rt_addrinfo rtinfo; 1560 struct rt_msghdr *rtm; 1561 boolean_t save = FALSE; 1562 int msglen, w_bufleft; 1563 void *ptr; 1564 1565 rttable_entry_rtinfo(&rtinfo, rn); 1566 msglen = rt_msgsize(RTM_GET, &rtinfo); 1567 1568 w_bufleft = w->w_bufsz - w->w_buflen; 1569 1570 if (rn->rn_dupedkey != NULL) { 1571 struct radix_node *rn1 = rn; 1572 int total_msglen = msglen; 1573 1574 /* 1575 * Make sure that we have enough space left for all 1576 * dupedkeys, since rn_walktree_at always starts 1577 * from the first dupedkey. 1578 */ 1579 while ((rn1 = rn1->rn_dupedkey) != NULL) { 1580 struct rt_addrinfo rtinfo1; 1581 int msglen1; 1582 1583 if (rn1->rn_flags & RNF_ROOT) 1584 continue; 1585 1586 rttable_entry_rtinfo(&rtinfo1, rn1); 1587 msglen1 = rt_msgsize(RTM_GET, &rtinfo1); 1588 total_msglen += msglen1; 1589 } 1590 1591 if (total_msglen > w_bufleft) { 1592 if (total_msglen > w->w_bufsz) { 1593 static int logged = 0; 1594 1595 if (!logged) { 1596 kprintf("buffer is too small for " 1597 "all dupedkeys, increase " 1598 "RTTABLE_DUMP_MSGCNT_MAX\n"); 1599 logged = 1; 1600 } 1601 return ENOMEM; 1602 } 1603 save = TRUE; 1604 } 1605 } else if (msglen > w_bufleft) { 1606 save = TRUE; 1607 } 1608 1609 if (save) { 1610 /* 1611 * Not enough buffer left; remember the position 1612 * to start from upon next round. 1613 */ 1614 KASSERT(msglen <= w->w_bufsz, ("msg too long %d", msglen)); 1615 1616 KASSERT(rtinfo.rti_dst->sa_len <= sizeof(w->w_key0), 1617 ("key too long %d", rtinfo.rti_dst->sa_len)); 1618 memset(&w->w_key0, 0, sizeof(w->w_key0)); 1619 memcpy(&w->w_key0, rtinfo.rti_dst, rtinfo.rti_dst->sa_len); 1620 w->w_key = (const char *)&w->w_key0; 1621 1622 if (rtinfo.rti_netmask != NULL) { 1623 KASSERT( 1624 rtinfo.rti_netmask->sa_len <= sizeof(w->w_mask0), 1625 ("mask too long %d", rtinfo.rti_netmask->sa_len)); 1626 memset(&w->w_mask0, 0, sizeof(w->w_mask0)); 1627 memcpy(&w->w_mask0, rtinfo.rti_netmask, 1628 rtinfo.rti_netmask->sa_len); 1629 w->w_mask = (const char *)&w->w_mask0; 1630 } else { 1631 w->w_mask = NULL; 1632 } 1633 return EJUSTRETURN; 1634 } 1635 1636 if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg)) 1637 return 0; 1638 1639 ptr = ((uint8_t *)w->w_buf) + w->w_buflen; 1640 rt_msg_buffer(RTM_GET, &rtinfo, ptr, msglen); 1641 1642 rtm = (struct rt_msghdr *)ptr; 1643 rtm->rtm_flags = rt->rt_flags; 1644 rtm->rtm_use = rt->rt_use; 1645 rtm->rtm_rmx = rt->rt_rmx; 1646 rtm->rtm_index = rt->rt_ifp->if_index; 1647 rtm->rtm_errno = rtm->rtm_pid = rtm->rtm_seq = 0; 1648 rtm->rtm_addrs = rtinfo.rti_addrs; 1649 1650 w->w_buflen += msglen; 1651 1652 return 0; 1653 } 1654 1655 static void 1656 rttable_walk_dispatch(netmsg_t msg) 1657 { 1658 struct netmsg_rttable_walk *nmsg = (struct netmsg_rttable_walk *)msg; 1659 struct radix_node_head *rnh = rt_tables[mycpuid][nmsg->af]; 1660 struct rttable_walkarg *w = nmsg->w; 1661 int error; 1662 1663 error = rnh->rnh_walktree_at(rnh, w->w_key, w->w_mask, 1664 rttable_walk_entry, w); 1665 lwkt_replymsg(&nmsg->base.lmsg, error); 1666 } 1667 1668 static int 1669 sysctl_rttable(int af, struct sysctl_req *req, int op, int arg) 1670 { 1671 struct rttable_walkarg w; 1672 int error, i; 1673 1674 error = rttable_walkarg_create(&w, op, arg); 1675 if (error) 1676 return error; 1677 1678 error = EINVAL; 1679 for (i = 1; i <= AF_MAX; i++) { 1680 if (rt_tables[mycpuid][i] != NULL && (af == 0 || af == i)) { 1681 w.w_key = NULL; 1682 w.w_mask = NULL; 1683 for (;;) { 1684 struct netmsg_rttable_walk nmsg; 1685 1686 netmsg_init(&nmsg.base, NULL, 1687 &curthread->td_msgport, 0, 1688 rttable_walk_dispatch); 1689 nmsg.af = i; 1690 nmsg.w = &w; 1691 1692 w.w_buflen = 0; 1693 1694 error = lwkt_domsg(netisr_cpuport(mycpuid), 1695 &nmsg.base.lmsg, 0); 1696 if (error && error != EJUSTRETURN) 1697 goto done; 1698 1699 if (req != NULL && w.w_buflen > 0) { 1700 int error1; 1701 1702 error1 = SYSCTL_OUT(req, w.w_buf, 1703 w.w_buflen); 1704 if (error1) { 1705 error = error1; 1706 goto done; 1707 } 1708 } 1709 if (error == 0) /* done */ 1710 break; 1711 } 1712 } 1713 } 1714 done: 1715 rttable_walkarg_destroy(&w); 1716 return error; 1717 } 1718 1719 static int 1720 sysctl_rtsock(SYSCTL_HANDLER_ARGS) 1721 { 1722 int *name = (int *)arg1; 1723 u_int namelen = arg2; 1724 int error = EINVAL; 1725 int origcpu, cpu; 1726 u_char af; 1727 struct walkarg w; 1728 1729 name ++; 1730 namelen--; 1731 if (req->newptr) 1732 return (EPERM); 1733 if (namelen != 3 && namelen != 4) 1734 return (EINVAL); 1735 af = name[0]; 1736 bzero(&w, sizeof w); 1737 w.w_op = name[1]; 1738 w.w_arg = name[2]; 1739 w.w_req = req; 1740 1741 /* 1742 * Optional third argument specifies cpu, used primarily for 1743 * debugging the route table. 1744 */ 1745 if (namelen == 4) { 1746 if (name[3] < 0 || name[3] >= netisr_ncpus) 1747 return (EINVAL); 1748 cpu = name[3]; 1749 } else { 1750 /* 1751 * Target cpu is not specified, use cpu0 then, so that 1752 * the result set will be relatively stable. 1753 */ 1754 cpu = 0; 1755 } 1756 origcpu = mycpuid; 1757 lwkt_migratecpu(cpu); 1758 1759 switch (w.w_op) { 1760 case NET_RT_DUMP: 1761 case NET_RT_FLAGS: 1762 error = sysctl_rttable(af, w.w_req, w.w_op, w.w_arg); 1763 break; 1764 1765 case NET_RT_IFLIST: 1766 error = sysctl_iflist(af, &w); 1767 break; 1768 } 1769 if (w.w_tmem != NULL) 1770 kfree(w.w_tmem, M_RTABLE); 1771 1772 lwkt_migratecpu(origcpu); 1773 return (error); 1774 } 1775 1776 SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD, sysctl_rtsock, ""); 1777 1778 /* 1779 * Definitions of protocols supported in the ROUTE domain. 1780 */ 1781 1782 static struct domain routedomain; /* or at least forward */ 1783 1784 static struct protosw routesw[] = { 1785 { 1786 .pr_type = SOCK_RAW, 1787 .pr_domain = &routedomain, 1788 .pr_protocol = 0, 1789 .pr_flags = PR_ATOMIC|PR_ADDR, 1790 .pr_input = NULL, 1791 .pr_output = route_output, 1792 .pr_ctlinput = raw_ctlinput, 1793 .pr_ctloutput = route_ctloutput, 1794 .pr_ctlport = cpu0_ctlport, 1795 1796 .pr_init = raw_init, 1797 .pr_usrreqs = &route_usrreqs 1798 } 1799 }; 1800 1801 static struct domain routedomain = { 1802 .dom_family = AF_ROUTE, 1803 .dom_name = "route", 1804 .dom_init = NULL, 1805 .dom_externalize = NULL, 1806 .dom_dispose = NULL, 1807 .dom_protosw = routesw, 1808 .dom_protoswNPROTOSW = &routesw[(sizeof routesw)/(sizeof routesw[0])], 1809 .dom_next = SLIST_ENTRY_INITIALIZER, 1810 .dom_rtattach = NULL, 1811 .dom_rtoffset = 0, 1812 .dom_maxrtkey = 0, 1813 .dom_ifattach = NULL, 1814 .dom_ifdetach = NULL 1815 }; 1816 1817 DOMAIN_SET(route); 1818 1819