1 /* $OpenBSD: ip_input.c,v 1.349 2020/08/01 23:41:55 gnezdo Exp $ */ 2 /* $NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 33 */ 34 35 #include "pf.h" 36 #include "carp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/domain.h> 42 #include <sys/mutex.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/pool.h> 48 #include <sys/task.h> 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_dl.h> 53 #include <net/route.h> 54 #include <net/netisr.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/if_ether.h> 59 #include <netinet/ip.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip_var.h> 63 #include <netinet/ip_icmp.h> 64 #include <net/if_types.h> 65 66 #ifdef INET6 67 #include <netinet6/ip6protosw.h> 68 #include <netinet6/ip6_var.h> 69 #endif 70 71 #if NPF > 0 72 #include <net/pfvar.h> 73 #endif 74 75 #ifdef MROUTING 76 #include <netinet/ip_mroute.h> 77 #endif 78 79 #ifdef IPSEC 80 #include <netinet/ip_ipsp.h> 81 #endif /* IPSEC */ 82 83 #if NCARP > 0 84 #include <netinet/ip_carp.h> 85 #endif 86 87 /* values controllable via sysctl */ 88 int ipforwarding = 0; 89 int ipmforwarding = 0; 90 int ipmultipath = 0; 91 int ipsendredirects = 1; 92 int ip_dosourceroute = 0; 93 int ip_defttl = IPDEFTTL; 94 int ip_mtudisc = 1; 95 u_int ip_mtudisc_timeout = IPMTUDISCTIMEOUT; 96 int ip_directedbcast = 0; 97 98 struct rttimer_queue *ip_mtudisc_timeout_q = NULL; 99 100 /* Protects `ipq' and `ip_frags'. */ 101 struct mutex ipq_mutex = MUTEX_INITIALIZER(IPL_SOFTNET); 102 103 /* IP reassembly queue */ 104 LIST_HEAD(, ipq) ipq; 105 106 /* Keep track of memory used for reassembly */ 107 int ip_maxqueue = 300; 108 int ip_frags = 0; 109 110 int *ipctl_vars[IPCTL_MAXID] = IPCTL_VARS; 111 112 struct pool ipqent_pool; 113 struct pool ipq_pool; 114 115 struct cpumem *ipcounters; 116 117 int ip_sysctl_ipstat(void *, size_t *, void *); 118 119 static struct mbuf_queue ipsend_mq; 120 121 extern struct niqueue arpinq; 122 123 int ip_ours(struct mbuf **, int *, int, int); 124 int ip_dooptions(struct mbuf *, struct ifnet *); 125 int in_ouraddr(struct mbuf *, struct ifnet *, struct rtentry **); 126 127 static void ip_send_dispatch(void *); 128 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq); 129 /* 130 * Used to save the IP options in case a protocol wants to respond 131 * to an incoming packet over the same route if the packet got here 132 * using IP source routing. This allows connection establishment and 133 * maintenance when the remote end is on a network that is not known 134 * to us. 135 */ 136 struct ip_srcrt { 137 int isr_nhops; /* number of hops */ 138 struct in_addr isr_dst; /* final destination */ 139 char isr_nop; /* one NOP to align */ 140 char isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */ 141 struct in_addr isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)]; 142 }; 143 144 void save_rte(struct mbuf *, u_char *, struct in_addr); 145 146 /* 147 * IP initialization: fill in IP protocol switch table. 148 * All protocols not implemented in kernel go to raw IP protocol handler. 149 */ 150 void 151 ip_init(void) 152 { 153 const struct protosw *pr; 154 int i; 155 const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP; 156 const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP; 157 const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP; 158 const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP; 159 160 ipcounters = counters_alloc(ips_ncounters); 161 162 pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 163 IPL_SOFTNET, 0, "ipqe", NULL); 164 pool_init(&ipq_pool, sizeof(struct ipq), 0, 165 IPL_SOFTNET, 0, "ipq", NULL); 166 167 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 168 if (pr == NULL) 169 panic("ip_init"); 170 for (i = 0; i < IPPROTO_MAX; i++) 171 ip_protox[i] = pr - inetsw; 172 for (pr = inetdomain.dom_protosw; 173 pr < inetdomain.dom_protoswNPROTOSW; pr++) 174 if (pr->pr_domain->dom_family == PF_INET && 175 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW && 176 pr->pr_protocol < IPPROTO_MAX) 177 ip_protox[pr->pr_protocol] = pr - inetsw; 178 LIST_INIT(&ipq); 179 if (ip_mtudisc != 0) 180 ip_mtudisc_timeout_q = 181 rt_timer_queue_create(ip_mtudisc_timeout); 182 183 /* Fill in list of ports not to allocate dynamically. */ 184 memset(&baddynamicports, 0, sizeof(baddynamicports)); 185 for (i = 0; defbaddynamicports_tcp[i] != 0; i++) 186 DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]); 187 for (i = 0; defbaddynamicports_udp[i] != 0; i++) 188 DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]); 189 190 /* Fill in list of ports only root can bind to. */ 191 memset(&rootonlyports, 0, sizeof(rootonlyports)); 192 for (i = 0; defrootonlyports_tcp[i] != 0; i++) 193 DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]); 194 for (i = 0; defrootonlyports_udp[i] != 0; i++) 195 DP_SET(rootonlyports.udp, defrootonlyports_udp[i]); 196 197 mq_init(&ipsend_mq, 64, IPL_SOFTNET); 198 199 #ifdef IPSEC 200 ipsec_init(); 201 #endif 202 } 203 204 /* 205 * IPv4 input routine. 206 * 207 * Checksum and byte swap header. Process options. Forward or deliver. 208 */ 209 void 210 ipv4_input(struct ifnet *ifp, struct mbuf *m) 211 { 212 int off, nxt; 213 214 off = 0; 215 nxt = ip_input_if(&m, &off, IPPROTO_IPV4, AF_UNSPEC, ifp); 216 KASSERT(nxt == IPPROTO_DONE); 217 } 218 219 int 220 ip_input_if(struct mbuf **mp, int *offp, int nxt, int af, struct ifnet *ifp) 221 { 222 struct mbuf *m = *mp; 223 struct rtentry *rt = NULL; 224 struct ip *ip; 225 int hlen, len; 226 in_addr_t pfrdr = 0; 227 228 KASSERT(*offp == 0); 229 230 ipstat_inc(ips_total); 231 if (m->m_len < sizeof (struct ip) && 232 (m = *mp = m_pullup(m, sizeof (struct ip))) == NULL) { 233 ipstat_inc(ips_toosmall); 234 goto bad; 235 } 236 ip = mtod(m, struct ip *); 237 if (ip->ip_v != IPVERSION) { 238 ipstat_inc(ips_badvers); 239 goto bad; 240 } 241 hlen = ip->ip_hl << 2; 242 if (hlen < sizeof(struct ip)) { /* minimum header length */ 243 ipstat_inc(ips_badhlen); 244 goto bad; 245 } 246 if (hlen > m->m_len) { 247 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 248 ipstat_inc(ips_badhlen); 249 goto bad; 250 } 251 ip = mtod(m, struct ip *); 252 } 253 254 /* 127/8 must not appear on wire - RFC1122 */ 255 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 256 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 257 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 258 ipstat_inc(ips_badaddr); 259 goto bad; 260 } 261 } 262 263 if ((m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_OK) == 0) { 264 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_BAD) { 265 ipstat_inc(ips_badsum); 266 goto bad; 267 } 268 269 ipstat_inc(ips_inswcsum); 270 if (in_cksum(m, hlen) != 0) { 271 ipstat_inc(ips_badsum); 272 goto bad; 273 } 274 } 275 276 /* Retrieve the packet length. */ 277 len = ntohs(ip->ip_len); 278 279 /* 280 * Convert fields to host representation. 281 */ 282 if (len < hlen) { 283 ipstat_inc(ips_badlen); 284 goto bad; 285 } 286 287 /* 288 * Check that the amount of data in the buffers 289 * is at least as much as the IP header would have us expect. 290 * Trim mbufs if longer than we expect. 291 * Drop packet if shorter than we expect. 292 */ 293 if (m->m_pkthdr.len < len) { 294 ipstat_inc(ips_tooshort); 295 goto bad; 296 } 297 if (m->m_pkthdr.len > len) { 298 if (m->m_len == m->m_pkthdr.len) { 299 m->m_len = len; 300 m->m_pkthdr.len = len; 301 } else 302 m_adj(m, len - m->m_pkthdr.len); 303 } 304 305 #if NCARP > 0 306 if (carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 307 &ip->ip_dst.s_addr, (ip->ip_p == IPPROTO_ICMP ? 0 : 1))) 308 goto bad; 309 #endif 310 311 #if NPF > 0 312 /* 313 * Packet filter 314 */ 315 pfrdr = ip->ip_dst.s_addr; 316 if (pf_test(AF_INET, PF_IN, ifp, mp) != PF_PASS) 317 goto bad; 318 m = *mp; 319 if (m == NULL) 320 goto bad; 321 322 ip = mtod(m, struct ip *); 323 hlen = ip->ip_hl << 2; 324 pfrdr = (pfrdr != ip->ip_dst.s_addr); 325 #endif 326 327 /* 328 * Process options and, if not destined for us, 329 * ship it on. ip_dooptions returns 1 when an 330 * error was detected (causing an icmp message 331 * to be sent and the original packet to be freed). 332 */ 333 if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp)) { 334 m = *mp = NULL; 335 goto bad; 336 } 337 338 if (ip->ip_dst.s_addr == INADDR_BROADCAST || 339 ip->ip_dst.s_addr == INADDR_ANY) { 340 nxt = ip_ours(mp, offp, nxt, af); 341 goto out; 342 } 343 344 switch(in_ouraddr(m, ifp, &rt)) { 345 case 2: 346 goto bad; 347 case 1: 348 nxt = ip_ours(mp, offp, nxt, af); 349 goto out; 350 } 351 352 if (IN_MULTICAST(ip->ip_dst.s_addr)) { 353 /* 354 * Make sure M_MCAST is set. It should theoretically 355 * already be there, but let's play safe because upper 356 * layers check for this flag. 357 */ 358 m->m_flags |= M_MCAST; 359 360 #ifdef MROUTING 361 if (ipmforwarding && ip_mrouter[ifp->if_rdomain]) { 362 int error; 363 364 if (m->m_flags & M_EXT) { 365 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 366 ipstat_inc(ips_toosmall); 367 goto bad; 368 } 369 ip = mtod(m, struct ip *); 370 } 371 /* 372 * If we are acting as a multicast router, all 373 * incoming multicast packets are passed to the 374 * kernel-level multicast forwarding function. 375 * The packet is returned (relatively) intact; if 376 * ip_mforward() returns a non-zero value, the packet 377 * must be discarded, else it may be accepted below. 378 * 379 * (The IP ident field is put in the same byte order 380 * as expected when ip_mforward() is called from 381 * ip_output().) 382 */ 383 KERNEL_LOCK(); 384 error = ip_mforward(m, ifp); 385 KERNEL_UNLOCK(); 386 if (error) { 387 ipstat_inc(ips_cantforward); 388 goto bad; 389 } 390 391 /* 392 * The process-level routing daemon needs to receive 393 * all multicast IGMP packets, whether or not this 394 * host belongs to their destination groups. 395 */ 396 if (ip->ip_p == IPPROTO_IGMP) { 397 nxt = ip_ours(mp, offp, nxt, af); 398 goto out; 399 } 400 ipstat_inc(ips_forward); 401 } 402 #endif 403 /* 404 * See if we belong to the destination multicast group on the 405 * arrival interface. 406 */ 407 if (!in_hasmulti(&ip->ip_dst, ifp)) { 408 ipstat_inc(ips_notmember); 409 if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr)) 410 ipstat_inc(ips_cantforward); 411 goto bad; 412 } 413 nxt = ip_ours(mp, offp, nxt, af); 414 goto out; 415 } 416 417 #if NCARP > 0 418 if (ip->ip_p == IPPROTO_ICMP && 419 carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 420 &ip->ip_dst.s_addr, 1)) 421 goto bad; 422 #endif 423 /* 424 * Not for us; forward if possible and desirable. 425 */ 426 if (ipforwarding == 0) { 427 ipstat_inc(ips_cantforward); 428 goto bad; 429 } 430 #ifdef IPSEC 431 if (ipsec_in_use) { 432 int rv; 433 434 rv = ipsec_forward_check(m, hlen, AF_INET); 435 if (rv != 0) { 436 ipstat_inc(ips_cantforward); 437 goto bad; 438 } 439 /* 440 * Fall through, forward packet. Outbound IPsec policy 441 * checking will occur in ip_output(). 442 */ 443 } 444 #endif /* IPSEC */ 445 446 ip_forward(m, ifp, rt, pfrdr); 447 *mp = NULL; 448 return IPPROTO_DONE; 449 bad: 450 nxt = IPPROTO_DONE; 451 m_freemp(mp); 452 out: 453 rtfree(rt); 454 return nxt; 455 } 456 457 /* 458 * IPv4 local-delivery routine. 459 * 460 * If fragmented try to reassemble. Pass to next level. 461 */ 462 int 463 ip_ours(struct mbuf **mp, int *offp, int nxt, int af) 464 { 465 struct mbuf *m = *mp; 466 struct ip *ip = mtod(m, struct ip *); 467 struct ipq *fp; 468 struct ipqent *ipqe; 469 int mff, hlen; 470 471 hlen = ip->ip_hl << 2; 472 473 /* 474 * If offset or IP_MF are set, must reassemble. 475 * Otherwise, nothing need be done. 476 * (We could look in the reassembly queue to see 477 * if the packet was previously fragmented, 478 * but it's not worth the time; just let them time out.) 479 */ 480 if (ip->ip_off &~ htons(IP_DF | IP_RF)) { 481 if (m->m_flags & M_EXT) { /* XXX */ 482 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 483 ipstat_inc(ips_toosmall); 484 return IPPROTO_DONE; 485 } 486 ip = mtod(m, struct ip *); 487 } 488 489 mtx_enter(&ipq_mutex); 490 491 /* 492 * Look for queue of fragments 493 * of this datagram. 494 */ 495 LIST_FOREACH(fp, &ipq, ipq_q) { 496 if (ip->ip_id == fp->ipq_id && 497 ip->ip_src.s_addr == fp->ipq_src.s_addr && 498 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 499 ip->ip_p == fp->ipq_p) 500 break; 501 } 502 503 /* 504 * Adjust ip_len to not reflect header, 505 * set ipqe_mff if more fragments are expected, 506 * convert offset of this to bytes. 507 */ 508 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 509 mff = (ip->ip_off & htons(IP_MF)) != 0; 510 if (mff) { 511 /* 512 * Make sure that fragments have a data length 513 * that's a non-zero multiple of 8 bytes. 514 */ 515 if (ntohs(ip->ip_len) == 0 || 516 (ntohs(ip->ip_len) & 0x7) != 0) { 517 ipstat_inc(ips_badfrags); 518 goto bad; 519 } 520 } 521 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 522 523 /* 524 * If datagram marked as having more fragments 525 * or if this is not the first fragment, 526 * attempt reassembly; if it succeeds, proceed. 527 */ 528 if (mff || ip->ip_off) { 529 ipstat_inc(ips_fragments); 530 if (ip_frags + 1 > ip_maxqueue) { 531 ip_flush(); 532 ipstat_inc(ips_rcvmemdrop); 533 goto bad; 534 } 535 536 ipqe = pool_get(&ipqent_pool, PR_NOWAIT); 537 if (ipqe == NULL) { 538 ipstat_inc(ips_rcvmemdrop); 539 goto bad; 540 } 541 ip_frags++; 542 ipqe->ipqe_mff = mff; 543 ipqe->ipqe_m = m; 544 ipqe->ipqe_ip = ip; 545 m = *mp = ip_reass(ipqe, fp); 546 if (m == NULL) 547 goto bad; 548 ipstat_inc(ips_reassembled); 549 ip = mtod(m, struct ip *); 550 hlen = ip->ip_hl << 2; 551 ip->ip_len = htons(ntohs(ip->ip_len) + hlen); 552 } else 553 if (fp) 554 ip_freef(fp); 555 556 mtx_leave(&ipq_mutex); 557 } 558 559 *offp = hlen; 560 nxt = ip->ip_p; 561 /* Check wheter we are already in a IPv4/IPv6 local deliver loop. */ 562 if (af == AF_UNSPEC) 563 nxt = ip_deliver(mp, offp, nxt, AF_INET); 564 return nxt; 565 bad: 566 mtx_leave(&ipq_mutex); 567 m_freemp(mp); 568 return IPPROTO_DONE; 569 } 570 571 #ifndef INET6 572 #define IPSTAT_INC(name) ipstat_inc(ips_##name) 573 #else 574 #define IPSTAT_INC(name) (af == AF_INET ? \ 575 ipstat_inc(ips_##name) : ip6stat_inc(ip6s_##name)) 576 #endif 577 578 int 579 ip_deliver(struct mbuf **mp, int *offp, int nxt, int af) 580 { 581 const struct protosw *psw; 582 int naf = af; 583 #ifdef INET6 584 int nest = 0; 585 #endif /* INET6 */ 586 587 /* pf might have modified stuff, might have to chksum */ 588 switch (af) { 589 case AF_INET: 590 in_proto_cksum_out(*mp, NULL); 591 break; 592 #ifdef INET6 593 case AF_INET6: 594 in6_proto_cksum_out(*mp, NULL); 595 break; 596 #endif /* INET6 */ 597 } 598 599 /* 600 * Tell launch routine the next header 601 */ 602 IPSTAT_INC(delivered); 603 604 while (nxt != IPPROTO_DONE) { 605 #ifdef INET6 606 if (af == AF_INET6 && 607 ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) { 608 ip6stat_inc(ip6s_toomanyhdr); 609 goto bad; 610 } 611 #endif /* INET6 */ 612 613 /* 614 * protection against faulty packet - there should be 615 * more sanity checks in header chain processing. 616 */ 617 if ((*mp)->m_pkthdr.len < *offp) { 618 IPSTAT_INC(tooshort); 619 goto bad; 620 } 621 622 #ifdef INET6 623 /* draft-itojun-ipv6-tcp-to-anycast */ 624 if (af == AF_INET6 && 625 ISSET((*mp)->m_flags, M_ACAST) && (nxt == IPPROTO_TCP)) { 626 if ((*mp)->m_len >= sizeof(struct ip6_hdr)) { 627 icmp6_error(*mp, ICMP6_DST_UNREACH, 628 ICMP6_DST_UNREACH_ADDR, 629 offsetof(struct ip6_hdr, ip6_dst)); 630 *mp = NULL; 631 } 632 goto bad; 633 } 634 #endif /* INET6 */ 635 636 #ifdef IPSEC 637 if (ipsec_in_use) { 638 if (ipsec_local_check(*mp, *offp, nxt, af) != 0) { 639 IPSTAT_INC(cantforward); 640 goto bad; 641 } 642 } 643 /* Otherwise, just fall through and deliver the packet */ 644 #endif /* IPSEC */ 645 646 switch (nxt) { 647 case IPPROTO_IPV4: 648 naf = AF_INET; 649 ipstat_inc(ips_delivered); 650 break; 651 #ifdef INET6 652 case IPPROTO_IPV6: 653 naf = AF_INET6; 654 ip6stat_inc(ip6s_delivered); 655 break; 656 #endif /* INET6 */ 657 } 658 switch (af) { 659 case AF_INET: 660 psw = &inetsw[ip_protox[nxt]]; 661 break; 662 #ifdef INET6 663 case AF_INET6: 664 psw = &inet6sw[ip6_protox[nxt]]; 665 break; 666 #endif /* INET6 */ 667 } 668 nxt = (*psw->pr_input)(mp, offp, nxt, af); 669 af = naf; 670 } 671 return nxt; 672 bad: 673 m_freemp(mp); 674 return IPPROTO_DONE; 675 } 676 #undef IPSTAT_INC 677 678 int 679 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct rtentry **prt) 680 { 681 struct rtentry *rt; 682 struct ip *ip; 683 struct sockaddr_in sin; 684 int match = 0; 685 686 #if NPF > 0 687 switch (pf_ouraddr(m)) { 688 case 0: 689 return (0); 690 case 1: 691 return (1); 692 default: 693 /* pf does not know it */ 694 break; 695 } 696 #endif 697 698 ip = mtod(m, struct ip *); 699 700 memset(&sin, 0, sizeof(sin)); 701 sin.sin_len = sizeof(sin); 702 sin.sin_family = AF_INET; 703 sin.sin_addr = ip->ip_dst; 704 rt = rtalloc_mpath(sintosa(&sin), &ip->ip_src.s_addr, 705 m->m_pkthdr.ph_rtableid); 706 if (rtisvalid(rt)) { 707 if (ISSET(rt->rt_flags, RTF_LOCAL)) 708 match = 1; 709 710 /* 711 * If directedbcast is enabled we only consider it local 712 * if it is received on the interface with that address. 713 */ 714 if (ISSET(rt->rt_flags, RTF_BROADCAST) && 715 (!ip_directedbcast || rt->rt_ifidx == ifp->if_index)) { 716 match = 1; 717 718 /* Make sure M_BCAST is set */ 719 m->m_flags |= M_BCAST; 720 } 721 } 722 *prt = rt; 723 724 if (!match) { 725 struct ifaddr *ifa; 726 727 /* 728 * No local address or broadcast address found, so check for 729 * ancient classful broadcast addresses. 730 * It must have been broadcast on the link layer, and for an 731 * address on the interface it was received on. 732 */ 733 if (!ISSET(m->m_flags, M_BCAST) || 734 !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr)) 735 return (0); 736 737 if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid)) 738 return (0); 739 /* 740 * The check in the loop assumes you only rx a packet on an UP 741 * interface, and that M_BCAST will only be set on a BROADCAST 742 * interface. 743 */ 744 NET_ASSERT_LOCKED(); 745 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 746 if (ifa->ifa_addr->sa_family != AF_INET) 747 continue; 748 749 if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, 750 ifatoia(ifa)->ia_addr.sin_addr.s_addr)) { 751 match = 1; 752 break; 753 } 754 } 755 } else if (ipforwarding == 0 && rt->rt_ifidx != ifp->if_index && 756 !((ifp->if_flags & IFF_LOOPBACK) || (ifp->if_type == IFT_ENC) || 757 (m->m_pkthdr.pf.flags & PF_TAG_TRANSLATE_LOCALHOST))) { 758 /* received on wrong interface. */ 759 #if NCARP > 0 760 struct ifnet *out_if; 761 762 /* 763 * Virtual IPs on carp interfaces need to be checked also 764 * against the parent interface and other carp interfaces 765 * sharing the same parent. 766 */ 767 out_if = if_get(rt->rt_ifidx); 768 if (!(out_if && carp_strict_addr_chk(out_if, ifp))) { 769 ipstat_inc(ips_wrongif); 770 match = 2; 771 } 772 if_put(out_if); 773 #else 774 ipstat_inc(ips_wrongif); 775 match = 2; 776 #endif 777 } 778 779 return (match); 780 } 781 782 /* 783 * Take incoming datagram fragment and try to 784 * reassemble it into whole datagram. If a chain for 785 * reassembly of this datagram already exists, then it 786 * is given as fp; otherwise have to make a chain. 787 */ 788 struct mbuf * 789 ip_reass(struct ipqent *ipqe, struct ipq *fp) 790 { 791 struct mbuf *m = ipqe->ipqe_m; 792 struct ipqent *nq, *p, *q; 793 struct ip *ip; 794 struct mbuf *t; 795 int hlen = ipqe->ipqe_ip->ip_hl << 2; 796 int i, next; 797 u_int8_t ecn, ecn0; 798 799 MUTEX_ASSERT_LOCKED(&ipq_mutex); 800 801 /* 802 * Presence of header sizes in mbufs 803 * would confuse code below. 804 */ 805 m->m_data += hlen; 806 m->m_len -= hlen; 807 808 /* 809 * If first fragment to arrive, create a reassembly queue. 810 */ 811 if (fp == NULL) { 812 fp = pool_get(&ipq_pool, PR_NOWAIT); 813 if (fp == NULL) 814 goto dropfrag; 815 LIST_INSERT_HEAD(&ipq, fp, ipq_q); 816 fp->ipq_ttl = IPFRAGTTL; 817 fp->ipq_p = ipqe->ipqe_ip->ip_p; 818 fp->ipq_id = ipqe->ipqe_ip->ip_id; 819 LIST_INIT(&fp->ipq_fragq); 820 fp->ipq_src = ipqe->ipqe_ip->ip_src; 821 fp->ipq_dst = ipqe->ipqe_ip->ip_dst; 822 p = NULL; 823 goto insert; 824 } 825 826 /* 827 * Handle ECN by comparing this segment with the first one; 828 * if CE is set, do not lose CE. 829 * drop if CE and not-ECT are mixed for the same packet. 830 */ 831 ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 832 ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 833 if (ecn == IPTOS_ECN_CE) { 834 if (ecn0 == IPTOS_ECN_NOTECT) 835 goto dropfrag; 836 if (ecn0 != IPTOS_ECN_CE) 837 LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |= 838 IPTOS_ECN_CE; 839 } 840 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 841 goto dropfrag; 842 843 /* 844 * Find a segment which begins after this one does. 845 */ 846 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 847 p = q, q = LIST_NEXT(q, ipqe_q)) 848 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) 849 break; 850 851 /* 852 * If there is a preceding segment, it may provide some of 853 * our data already. If so, drop the data from the incoming 854 * segment. If it provides all of our data, drop us. 855 */ 856 if (p != NULL) { 857 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 858 ntohs(ipqe->ipqe_ip->ip_off); 859 if (i > 0) { 860 if (i >= ntohs(ipqe->ipqe_ip->ip_len)) 861 goto dropfrag; 862 m_adj(ipqe->ipqe_m, i); 863 ipqe->ipqe_ip->ip_off = 864 htons(ntohs(ipqe->ipqe_ip->ip_off) + i); 865 ipqe->ipqe_ip->ip_len = 866 htons(ntohs(ipqe->ipqe_ip->ip_len) - i); 867 } 868 } 869 870 /* 871 * While we overlap succeeding segments trim them or, 872 * if they are completely covered, dequeue them. 873 */ 874 for (; q != NULL && 875 ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > 876 ntohs(q->ipqe_ip->ip_off); q = nq) { 877 i = (ntohs(ipqe->ipqe_ip->ip_off) + 878 ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); 879 if (i < ntohs(q->ipqe_ip->ip_len)) { 880 q->ipqe_ip->ip_len = 881 htons(ntohs(q->ipqe_ip->ip_len) - i); 882 q->ipqe_ip->ip_off = 883 htons(ntohs(q->ipqe_ip->ip_off) + i); 884 m_adj(q->ipqe_m, i); 885 break; 886 } 887 nq = LIST_NEXT(q, ipqe_q); 888 m_freem(q->ipqe_m); 889 LIST_REMOVE(q, ipqe_q); 890 pool_put(&ipqent_pool, q); 891 ip_frags--; 892 } 893 894 insert: 895 /* 896 * Stick new segment in its place; 897 * check for complete reassembly. 898 */ 899 if (p == NULL) { 900 LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 901 } else { 902 LIST_INSERT_AFTER(p, ipqe, ipqe_q); 903 } 904 next = 0; 905 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 906 p = q, q = LIST_NEXT(q, ipqe_q)) { 907 if (ntohs(q->ipqe_ip->ip_off) != next) 908 return (0); 909 next += ntohs(q->ipqe_ip->ip_len); 910 } 911 if (p->ipqe_mff) 912 return (0); 913 914 /* 915 * Reassembly is complete. Check for a bogus message size and 916 * concatenate fragments. 917 */ 918 q = LIST_FIRST(&fp->ipq_fragq); 919 ip = q->ipqe_ip; 920 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 921 ipstat_inc(ips_toolong); 922 ip_freef(fp); 923 return (0); 924 } 925 m = q->ipqe_m; 926 t = m->m_next; 927 m->m_next = 0; 928 m_cat(m, t); 929 nq = LIST_NEXT(q, ipqe_q); 930 pool_put(&ipqent_pool, q); 931 ip_frags--; 932 for (q = nq; q != NULL; q = nq) { 933 t = q->ipqe_m; 934 nq = LIST_NEXT(q, ipqe_q); 935 pool_put(&ipqent_pool, q); 936 ip_frags--; 937 m_removehdr(t); 938 m_cat(m, t); 939 } 940 941 /* 942 * Create header for new ip packet by 943 * modifying header of first packet; 944 * dequeue and discard fragment reassembly header. 945 * Make header visible. 946 */ 947 ip->ip_len = htons(next); 948 ip->ip_src = fp->ipq_src; 949 ip->ip_dst = fp->ipq_dst; 950 LIST_REMOVE(fp, ipq_q); 951 pool_put(&ipq_pool, fp); 952 m->m_len += (ip->ip_hl << 2); 953 m->m_data -= (ip->ip_hl << 2); 954 m_calchdrlen(m); 955 return (m); 956 957 dropfrag: 958 ipstat_inc(ips_fragdropped); 959 m_freem(m); 960 pool_put(&ipqent_pool, ipqe); 961 ip_frags--; 962 return (NULL); 963 } 964 965 /* 966 * Free a fragment reassembly header and all 967 * associated datagrams. 968 */ 969 void 970 ip_freef(struct ipq *fp) 971 { 972 struct ipqent *q; 973 974 MUTEX_ASSERT_LOCKED(&ipq_mutex); 975 976 while ((q = LIST_FIRST(&fp->ipq_fragq)) != NULL) { 977 LIST_REMOVE(q, ipqe_q); 978 m_freem(q->ipqe_m); 979 pool_put(&ipqent_pool, q); 980 ip_frags--; 981 } 982 LIST_REMOVE(fp, ipq_q); 983 pool_put(&ipq_pool, fp); 984 } 985 986 /* 987 * IP timer processing; 988 * if a timer expires on a reassembly queue, discard it. 989 */ 990 void 991 ip_slowtimo(void) 992 { 993 struct ipq *fp, *nfp; 994 995 mtx_enter(&ipq_mutex); 996 LIST_FOREACH_SAFE(fp, &ipq, ipq_q, nfp) { 997 if (--fp->ipq_ttl == 0) { 998 ipstat_inc(ips_fragtimeout); 999 ip_freef(fp); 1000 } 1001 } 1002 mtx_leave(&ipq_mutex); 1003 } 1004 1005 /* 1006 * Flush a bunch of datagram fragments, till we are down to 75%. 1007 */ 1008 void 1009 ip_flush(void) 1010 { 1011 int max = 50; 1012 1013 MUTEX_ASSERT_LOCKED(&ipq_mutex); 1014 1015 while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) { 1016 ipstat_inc(ips_fragdropped); 1017 ip_freef(LIST_FIRST(&ipq)); 1018 } 1019 } 1020 1021 /* 1022 * Do option processing on a datagram, 1023 * possibly discarding it if bad options are encountered, 1024 * or forwarding it if source-routed. 1025 * Returns 1 if packet has been forwarded/freed, 1026 * 0 if the packet should be processed further. 1027 */ 1028 int 1029 ip_dooptions(struct mbuf *m, struct ifnet *ifp) 1030 { 1031 struct ip *ip = mtod(m, struct ip *); 1032 unsigned int rtableid = m->m_pkthdr.ph_rtableid; 1033 struct rtentry *rt; 1034 struct sockaddr_in ipaddr; 1035 u_char *cp; 1036 struct ip_timestamp ipt; 1037 struct in_ifaddr *ia; 1038 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; 1039 struct in_addr sin, dst; 1040 u_int32_t ntime; 1041 1042 dst = ip->ip_dst; 1043 cp = (u_char *)(ip + 1); 1044 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1045 1046 KERNEL_LOCK(); 1047 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1048 opt = cp[IPOPT_OPTVAL]; 1049 if (opt == IPOPT_EOL) 1050 break; 1051 if (opt == IPOPT_NOP) 1052 optlen = 1; 1053 else { 1054 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1055 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1056 goto bad; 1057 } 1058 optlen = cp[IPOPT_OLEN]; 1059 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1060 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1061 goto bad; 1062 } 1063 } 1064 1065 switch (opt) { 1066 1067 default: 1068 break; 1069 1070 /* 1071 * Source routing with record. 1072 * Find interface with current destination address. 1073 * If none on this machine then drop if strictly routed, 1074 * or do nothing if loosely routed. 1075 * Record interface address and bring up next address 1076 * component. If strictly routed make sure next 1077 * address is on directly accessible net. 1078 */ 1079 case IPOPT_LSRR: 1080 case IPOPT_SSRR: 1081 if (!ip_dosourceroute) { 1082 type = ICMP_UNREACH; 1083 code = ICMP_UNREACH_SRCFAIL; 1084 goto bad; 1085 } 1086 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1087 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1088 goto bad; 1089 } 1090 memset(&ipaddr, 0, sizeof(ipaddr)); 1091 ipaddr.sin_family = AF_INET; 1092 ipaddr.sin_len = sizeof(ipaddr); 1093 ipaddr.sin_addr = ip->ip_dst; 1094 ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr), 1095 m->m_pkthdr.ph_rtableid)); 1096 if (ia == NULL) { 1097 if (opt == IPOPT_SSRR) { 1098 type = ICMP_UNREACH; 1099 code = ICMP_UNREACH_SRCFAIL; 1100 goto bad; 1101 } 1102 /* 1103 * Loose routing, and not at next destination 1104 * yet; nothing to do except forward. 1105 */ 1106 break; 1107 } 1108 off--; /* 0 origin */ 1109 if ((off + sizeof(struct in_addr)) > optlen) { 1110 /* 1111 * End of source route. Should be for us. 1112 */ 1113 save_rte(m, cp, ip->ip_src); 1114 break; 1115 } 1116 1117 /* 1118 * locate outgoing interface 1119 */ 1120 memset(&ipaddr, 0, sizeof(ipaddr)); 1121 ipaddr.sin_family = AF_INET; 1122 ipaddr.sin_len = sizeof(ipaddr); 1123 memcpy(&ipaddr.sin_addr, cp + off, 1124 sizeof(ipaddr.sin_addr)); 1125 /* keep packet in the virtual instance */ 1126 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1127 if (!rtisvalid(rt) || ((opt == IPOPT_SSRR) && 1128 ISSET(rt->rt_flags, RTF_GATEWAY))) { 1129 type = ICMP_UNREACH; 1130 code = ICMP_UNREACH_SRCFAIL; 1131 rtfree(rt); 1132 goto bad; 1133 } 1134 ia = ifatoia(rt->rt_ifa); 1135 memcpy(cp + off, &ia->ia_addr.sin_addr, 1136 sizeof(struct in_addr)); 1137 rtfree(rt); 1138 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1139 ip->ip_dst = ipaddr.sin_addr; 1140 /* 1141 * Let ip_intr's mcast routing check handle mcast pkts 1142 */ 1143 forward = !IN_MULTICAST(ip->ip_dst.s_addr); 1144 break; 1145 1146 case IPOPT_RR: 1147 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1148 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1149 goto bad; 1150 } 1151 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1152 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1153 goto bad; 1154 } 1155 1156 /* 1157 * If no space remains, ignore. 1158 */ 1159 off--; /* 0 origin */ 1160 if ((off + sizeof(struct in_addr)) > optlen) 1161 break; 1162 memset(&ipaddr, 0, sizeof(ipaddr)); 1163 ipaddr.sin_family = AF_INET; 1164 ipaddr.sin_len = sizeof(ipaddr); 1165 ipaddr.sin_addr = ip->ip_dst; 1166 /* 1167 * locate outgoing interface; if we're the destination, 1168 * use the incoming interface (should be same). 1169 * Again keep the packet inside the virtual instance. 1170 */ 1171 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1172 if (!rtisvalid(rt)) { 1173 type = ICMP_UNREACH; 1174 code = ICMP_UNREACH_HOST; 1175 rtfree(rt); 1176 goto bad; 1177 } 1178 ia = ifatoia(rt->rt_ifa); 1179 memcpy(cp + off, &ia->ia_addr.sin_addr, 1180 sizeof(struct in_addr)); 1181 rtfree(rt); 1182 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1183 break; 1184 1185 case IPOPT_TS: 1186 code = cp - (u_char *)ip; 1187 if (optlen < sizeof(struct ip_timestamp)) 1188 goto bad; 1189 memcpy(&ipt, cp, sizeof(struct ip_timestamp)); 1190 if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5) 1191 goto bad; 1192 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) { 1193 if (++ipt.ipt_oflw == 0) 1194 goto bad; 1195 break; 1196 } 1197 memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin); 1198 switch (ipt.ipt_flg) { 1199 1200 case IPOPT_TS_TSONLY: 1201 break; 1202 1203 case IPOPT_TS_TSANDADDR: 1204 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1205 sizeof(struct in_addr) > ipt.ipt_len) 1206 goto bad; 1207 memset(&ipaddr, 0, sizeof(ipaddr)); 1208 ipaddr.sin_family = AF_INET; 1209 ipaddr.sin_len = sizeof(ipaddr); 1210 ipaddr.sin_addr = dst; 1211 ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr), 1212 ifp)); 1213 if (ia == NULL) 1214 continue; 1215 memcpy(&sin, &ia->ia_addr.sin_addr, 1216 sizeof(struct in_addr)); 1217 ipt.ipt_ptr += sizeof(struct in_addr); 1218 break; 1219 1220 case IPOPT_TS_PRESPEC: 1221 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1222 sizeof(struct in_addr) > ipt.ipt_len) 1223 goto bad; 1224 memset(&ipaddr, 0, sizeof(ipaddr)); 1225 ipaddr.sin_family = AF_INET; 1226 ipaddr.sin_len = sizeof(ipaddr); 1227 ipaddr.sin_addr = sin; 1228 if (ifa_ifwithaddr(sintosa(&ipaddr), 1229 m->m_pkthdr.ph_rtableid) == NULL) 1230 continue; 1231 ipt.ipt_ptr += sizeof(struct in_addr); 1232 break; 1233 1234 default: 1235 /* XXX can't take &ipt->ipt_flg */ 1236 code = (u_char *)&ipt.ipt_ptr - 1237 (u_char *)ip + 1; 1238 goto bad; 1239 } 1240 ntime = iptime(); 1241 memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t)); 1242 ipt.ipt_ptr += sizeof(u_int32_t); 1243 } 1244 } 1245 KERNEL_UNLOCK(); 1246 if (forward && ipforwarding) { 1247 ip_forward(m, ifp, NULL, 1); 1248 return (1); 1249 } 1250 return (0); 1251 bad: 1252 KERNEL_UNLOCK(); 1253 icmp_error(m, type, code, 0, 0); 1254 ipstat_inc(ips_badoptions); 1255 return (1); 1256 } 1257 1258 /* 1259 * Save incoming source route for use in replies, 1260 * to be picked up later by ip_srcroute if the receiver is interested. 1261 */ 1262 void 1263 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1264 { 1265 struct ip_srcrt *isr; 1266 struct m_tag *mtag; 1267 unsigned olen; 1268 1269 olen = option[IPOPT_OLEN]; 1270 if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes)) 1271 return; 1272 1273 mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT); 1274 if (mtag == NULL) 1275 return; 1276 isr = (struct ip_srcrt *)(mtag + 1); 1277 1278 memcpy(isr->isr_hdr, option, olen); 1279 isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1280 isr->isr_dst = dst; 1281 m_tag_prepend(m, mtag); 1282 } 1283 1284 /* 1285 * Retrieve incoming source route for use in replies, 1286 * in the same form used by setsockopt. 1287 * The first hop is placed before the options, will be removed later. 1288 */ 1289 struct mbuf * 1290 ip_srcroute(struct mbuf *m0) 1291 { 1292 struct in_addr *p, *q; 1293 struct mbuf *m; 1294 struct ip_srcrt *isr; 1295 struct m_tag *mtag; 1296 1297 if (!ip_dosourceroute) 1298 return (NULL); 1299 1300 mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL); 1301 if (mtag == NULL) 1302 return (NULL); 1303 isr = (struct ip_srcrt *)(mtag + 1); 1304 1305 if (isr->isr_nhops == 0) 1306 return (NULL); 1307 m = m_get(M_DONTWAIT, MT_SOOPTS); 1308 if (m == NULL) 1309 return (NULL); 1310 1311 #define OPTSIZ (sizeof(isr->isr_nop) + sizeof(isr->isr_hdr)) 1312 1313 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */ 1314 m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ; 1315 1316 /* 1317 * First save first hop for return route 1318 */ 1319 p = &(isr->isr_routes[isr->isr_nhops - 1]); 1320 *(mtod(m, struct in_addr *)) = *p--; 1321 1322 /* 1323 * Copy option fields and padding (nop) to mbuf. 1324 */ 1325 isr->isr_nop = IPOPT_NOP; 1326 isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF; 1327 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop, 1328 OPTSIZ); 1329 q = (struct in_addr *)(mtod(m, caddr_t) + 1330 sizeof(struct in_addr) + OPTSIZ); 1331 #undef OPTSIZ 1332 /* 1333 * Record return path as an IP source route, 1334 * reversing the path (pointers are now aligned). 1335 */ 1336 while (p >= isr->isr_routes) { 1337 *q++ = *p--; 1338 } 1339 /* 1340 * Last hop goes to final destination. 1341 */ 1342 *q = isr->isr_dst; 1343 m_tag_delete(m0, (struct m_tag *)isr); 1344 return (m); 1345 } 1346 1347 /* 1348 * Strip out IP options, at higher level protocol in the kernel. 1349 */ 1350 void 1351 ip_stripoptions(struct mbuf *m) 1352 { 1353 int i; 1354 struct ip *ip = mtod(m, struct ip *); 1355 caddr_t opts; 1356 int olen; 1357 1358 olen = (ip->ip_hl<<2) - sizeof (struct ip); 1359 opts = (caddr_t)(ip + 1); 1360 i = m->m_len - (sizeof (struct ip) + olen); 1361 memmove(opts, opts + olen, i); 1362 m->m_len -= olen; 1363 if (m->m_flags & M_PKTHDR) 1364 m->m_pkthdr.len -= olen; 1365 ip->ip_hl = sizeof(struct ip) >> 2; 1366 ip->ip_len = htons(ntohs(ip->ip_len) - olen); 1367 } 1368 1369 const u_char inetctlerrmap[PRC_NCMDS] = { 1370 0, 0, 0, 0, 1371 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1372 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1373 EMSGSIZE, EHOSTUNREACH, 0, 0, 1374 0, 0, 0, 0, 1375 ENOPROTOOPT 1376 }; 1377 1378 /* 1379 * Forward a packet. If some error occurs return the sender 1380 * an icmp packet. Note we can't always generate a meaningful 1381 * icmp message because icmp doesn't have a large enough repertoire 1382 * of codes and types. 1383 * 1384 * If not forwarding, just drop the packet. This could be confusing 1385 * if ipforwarding was zero but some routing protocol was advancing 1386 * us as a gateway to somewhere. However, we must let the routing 1387 * protocol deal with that. 1388 * 1389 * The srcrt parameter indicates whether the packet is being forwarded 1390 * via a source route. 1391 */ 1392 void 1393 ip_forward(struct mbuf *m, struct ifnet *ifp, struct rtentry *rt, int srcrt) 1394 { 1395 struct mbuf mfake, *mcopy = NULL; 1396 struct ip *ip = mtod(m, struct ip *); 1397 struct sockaddr_in *sin; 1398 struct route ro; 1399 int error, type = 0, code = 0, destmtu = 0, fake = 0, len; 1400 u_int32_t dest; 1401 1402 dest = 0; 1403 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1404 ipstat_inc(ips_cantforward); 1405 m_freem(m); 1406 goto freecopy; 1407 } 1408 if (ip->ip_ttl <= IPTTLDEC) { 1409 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1410 goto freecopy; 1411 } 1412 1413 sin = satosin(&ro.ro_dst); 1414 memset(sin, 0, sizeof(*sin)); 1415 sin->sin_family = AF_INET; 1416 sin->sin_len = sizeof(*sin); 1417 sin->sin_addr = ip->ip_dst; 1418 1419 if (!rtisvalid(rt)) { 1420 rtfree(rt); 1421 rt = rtalloc_mpath(sintosa(sin), &ip->ip_src.s_addr, 1422 m->m_pkthdr.ph_rtableid); 1423 if (rt == NULL) { 1424 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 1425 return; 1426 } 1427 } 1428 1429 /* 1430 * Save at most 68 bytes of the packet in case 1431 * we need to generate an ICMP message to the src. 1432 * The data is saved in the mbuf on the stack that 1433 * acts as a temporary storage not intended to be 1434 * passed down the IP stack or to the mfree. 1435 */ 1436 memset(&mfake.m_hdr, 0, sizeof(mfake.m_hdr)); 1437 mfake.m_type = m->m_type; 1438 if (m_dup_pkthdr(&mfake, m, M_DONTWAIT) == 0) { 1439 mfake.m_data = mfake.m_pktdat; 1440 len = min(ntohs(ip->ip_len), 68); 1441 m_copydata(m, 0, len, mfake.m_pktdat); 1442 mfake.m_pkthdr.len = mfake.m_len = len; 1443 #if NPF > 0 1444 pf_pkt_addr_changed(&mfake); 1445 #endif /* NPF > 0 */ 1446 fake = 1; 1447 } 1448 1449 ip->ip_ttl -= IPTTLDEC; 1450 1451 /* 1452 * If forwarding packet using same interface that it came in on, 1453 * perhaps should send a redirect to sender to shortcut a hop. 1454 * Only send redirect if source is sending directly to us, 1455 * and if packet was not source routed (or has any options). 1456 * Also, don't send redirect if forwarding using a default route 1457 * or a route modified by a redirect. 1458 * Don't send redirect if we advertise destination's arp address 1459 * as ours (proxy arp). 1460 */ 1461 if ((rt->rt_ifidx == ifp->if_index) && 1462 (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1463 satosin(rt_key(rt))->sin_addr.s_addr != 0 && 1464 ipsendredirects && !srcrt && 1465 !arpproxy(satosin(rt_key(rt))->sin_addr, m->m_pkthdr.ph_rtableid)) { 1466 if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) == 1467 ifatoia(rt->rt_ifa)->ia_net) { 1468 if (rt->rt_flags & RTF_GATEWAY) 1469 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1470 else 1471 dest = ip->ip_dst.s_addr; 1472 /* Router requirements says to only send host redirects */ 1473 type = ICMP_REDIRECT; 1474 code = ICMP_REDIRECT_HOST; 1475 } 1476 } 1477 1478 ro.ro_rt = rt; 1479 ro.ro_tableid = m->m_pkthdr.ph_rtableid; 1480 error = ip_output(m, NULL, &ro, 1481 (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), 1482 NULL, NULL, 0); 1483 rt = ro.ro_rt; 1484 if (error) 1485 ipstat_inc(ips_cantforward); 1486 else { 1487 ipstat_inc(ips_forward); 1488 if (type) 1489 ipstat_inc(ips_redirectsent); 1490 else 1491 goto freecopy; 1492 } 1493 if (!fake) 1494 goto freecopy; 1495 1496 switch (error) { 1497 1498 case 0: /* forwarded, but need redirect */ 1499 /* type, code set above */ 1500 break; 1501 1502 case ENETUNREACH: /* shouldn't happen, checked above */ 1503 case EHOSTUNREACH: 1504 case ENETDOWN: 1505 case EHOSTDOWN: 1506 default: 1507 type = ICMP_UNREACH; 1508 code = ICMP_UNREACH_HOST; 1509 break; 1510 1511 case EMSGSIZE: 1512 type = ICMP_UNREACH; 1513 code = ICMP_UNREACH_NEEDFRAG; 1514 1515 #ifdef IPSEC 1516 if (rt != NULL) { 1517 if (rt->rt_mtu) 1518 destmtu = rt->rt_mtu; 1519 else { 1520 struct ifnet *destifp; 1521 1522 destifp = if_get(rt->rt_ifidx); 1523 if (destifp != NULL) 1524 destmtu = destifp->if_mtu; 1525 if_put(destifp); 1526 } 1527 } 1528 #endif /*IPSEC*/ 1529 ipstat_inc(ips_cantfrag); 1530 break; 1531 1532 case EACCES: 1533 /* 1534 * pf(4) blocked the packet. There is no need to send an ICMP 1535 * packet back since pf(4) takes care of it. 1536 */ 1537 goto freecopy; 1538 case ENOBUFS: 1539 /* 1540 * a router should not generate ICMP_SOURCEQUENCH as 1541 * required in RFC1812 Requirements for IP Version 4 Routers. 1542 * source quench could be a big problem under DoS attacks, 1543 * or the underlying interface is rate-limited. 1544 */ 1545 goto freecopy; 1546 } 1547 1548 mcopy = m_copym(&mfake, 0, len, M_DONTWAIT); 1549 if (mcopy) 1550 icmp_error(mcopy, type, code, dest, destmtu); 1551 1552 freecopy: 1553 if (fake) 1554 m_tag_delete_chain(&mfake); 1555 rtfree(rt); 1556 } 1557 1558 int 1559 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1560 size_t newlen) 1561 { 1562 int error; 1563 #ifdef MROUTING 1564 extern int ip_mrtproto; 1565 extern struct mrtstat mrtstat; 1566 #endif 1567 1568 /* Almost all sysctl names at this level are terminal. */ 1569 if (namelen != 1 && name[0] != IPCTL_IFQUEUE && 1570 name[0] != IPCTL_ARPQUEUE) 1571 return (ENOTDIR); 1572 1573 switch (name[0]) { 1574 case IPCTL_SOURCEROUTE: 1575 /* 1576 * Don't allow this to change in a secure environment. 1577 */ 1578 if (newp && securelevel > 0) 1579 return (EPERM); 1580 NET_LOCK(); 1581 error = sysctl_int(oldp, oldlenp, newp, newlen, 1582 &ip_dosourceroute); 1583 NET_UNLOCK(); 1584 return (error); 1585 case IPCTL_MTUDISC: 1586 NET_LOCK(); 1587 error = sysctl_int(oldp, oldlenp, newp, newlen, 1588 &ip_mtudisc); 1589 if (ip_mtudisc != 0 && ip_mtudisc_timeout_q == NULL) { 1590 ip_mtudisc_timeout_q = 1591 rt_timer_queue_create(ip_mtudisc_timeout); 1592 } else if (ip_mtudisc == 0 && ip_mtudisc_timeout_q != NULL) { 1593 rt_timer_queue_destroy(ip_mtudisc_timeout_q); 1594 ip_mtudisc_timeout_q = NULL; 1595 } 1596 NET_UNLOCK(); 1597 return error; 1598 case IPCTL_MTUDISCTIMEOUT: 1599 NET_LOCK(); 1600 error = sysctl_int(oldp, oldlenp, newp, newlen, 1601 &ip_mtudisc_timeout); 1602 if (ip_mtudisc_timeout_q != NULL) 1603 rt_timer_queue_change(ip_mtudisc_timeout_q, 1604 ip_mtudisc_timeout); 1605 NET_UNLOCK(); 1606 return (error); 1607 #ifdef IPSEC 1608 case IPCTL_ENCDEBUG: 1609 case IPCTL_IPSEC_STATS: 1610 case IPCTL_IPSEC_EXPIRE_ACQUIRE: 1611 case IPCTL_IPSEC_EMBRYONIC_SA_TIMEOUT: 1612 case IPCTL_IPSEC_REQUIRE_PFS: 1613 case IPCTL_IPSEC_SOFT_ALLOCATIONS: 1614 case IPCTL_IPSEC_ALLOCATIONS: 1615 case IPCTL_IPSEC_SOFT_BYTES: 1616 case IPCTL_IPSEC_BYTES: 1617 case IPCTL_IPSEC_TIMEOUT: 1618 case IPCTL_IPSEC_SOFT_TIMEOUT: 1619 case IPCTL_IPSEC_SOFT_FIRSTUSE: 1620 case IPCTL_IPSEC_FIRSTUSE: 1621 case IPCTL_IPSEC_ENC_ALGORITHM: 1622 case IPCTL_IPSEC_AUTH_ALGORITHM: 1623 case IPCTL_IPSEC_IPCOMP_ALGORITHM: 1624 return (ipsec_sysctl(name, namelen, oldp, oldlenp, newp, 1625 newlen)); 1626 #endif 1627 case IPCTL_IFQUEUE: 1628 return (EOPNOTSUPP); 1629 case IPCTL_ARPQUEUE: 1630 return (sysctl_niq(name + 1, namelen - 1, 1631 oldp, oldlenp, newp, newlen, &arpinq)); 1632 case IPCTL_STATS: 1633 return (ip_sysctl_ipstat(oldp, oldlenp, newp)); 1634 #ifdef MROUTING 1635 case IPCTL_MRTSTATS: 1636 return (sysctl_rdstruct(oldp, oldlenp, newp, 1637 &mrtstat, sizeof(mrtstat))); 1638 case IPCTL_MRTPROTO: 1639 return (sysctl_rdint(oldp, oldlenp, newp, ip_mrtproto)); 1640 case IPCTL_MRTMFC: 1641 if (newp) 1642 return (EPERM); 1643 NET_LOCK(); 1644 error = mrt_sysctl_mfc(oldp, oldlenp); 1645 NET_UNLOCK(); 1646 return (error); 1647 case IPCTL_MRTVIF: 1648 if (newp) 1649 return (EPERM); 1650 NET_LOCK(); 1651 error = mrt_sysctl_vif(oldp, oldlenp); 1652 NET_UNLOCK(); 1653 return (error); 1654 #else 1655 case IPCTL_MRTPROTO: 1656 case IPCTL_MRTSTATS: 1657 case IPCTL_MRTMFC: 1658 case IPCTL_MRTVIF: 1659 return (EOPNOTSUPP); 1660 #endif 1661 default: 1662 NET_LOCK(); 1663 error = sysctl_int_arr(ipctl_vars, nitems(ipctl_vars), name, 1664 namelen, oldp, oldlenp, newp, newlen); 1665 NET_UNLOCK(); 1666 return (error); 1667 } 1668 /* NOTREACHED */ 1669 } 1670 1671 int 1672 ip_sysctl_ipstat(void *oldp, size_t *oldlenp, void *newp) 1673 { 1674 uint64_t counters[ips_ncounters]; 1675 struct ipstat ipstat; 1676 u_long *words = (u_long *)&ipstat; 1677 int i; 1678 1679 CTASSERT(sizeof(ipstat) == (nitems(counters) * sizeof(u_long))); 1680 memset(&ipstat, 0, sizeof ipstat); 1681 counters_read(ipcounters, counters, nitems(counters)); 1682 1683 for (i = 0; i < nitems(counters); i++) 1684 words[i] = (u_long)counters[i]; 1685 1686 return (sysctl_rdstruct(oldp, oldlenp, newp, &ipstat, sizeof(ipstat))); 1687 } 1688 1689 void 1690 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1691 struct mbuf *m) 1692 { 1693 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1694 struct timeval tv; 1695 1696 m_microtime(m, &tv); 1697 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1698 SCM_TIMESTAMP, SOL_SOCKET); 1699 if (*mp) 1700 mp = &(*mp)->m_next; 1701 } 1702 1703 if (inp->inp_flags & INP_RECVDSTADDR) { 1704 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1705 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1706 if (*mp) 1707 mp = &(*mp)->m_next; 1708 } 1709 #ifdef notyet 1710 /* this code is broken and will probably never be fixed. */ 1711 /* options were tossed already */ 1712 if (inp->inp_flags & INP_RECVOPTS) { 1713 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1714 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1715 if (*mp) 1716 mp = &(*mp)->m_next; 1717 } 1718 /* ip_srcroute doesn't do what we want here, need to fix */ 1719 if (inp->inp_flags & INP_RECVRETOPTS) { 1720 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1721 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1722 if (*mp) 1723 mp = &(*mp)->m_next; 1724 } 1725 #endif 1726 if (inp->inp_flags & INP_RECVIF) { 1727 struct sockaddr_dl sdl; 1728 struct ifnet *ifp; 1729 1730 ifp = if_get(m->m_pkthdr.ph_ifidx); 1731 if (ifp == NULL || ifp->if_sadl == NULL) { 1732 memset(&sdl, 0, sizeof(sdl)); 1733 sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); 1734 sdl.sdl_family = AF_LINK; 1735 sdl.sdl_index = ifp != NULL ? ifp->if_index : 0; 1736 sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0; 1737 *mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len, 1738 IP_RECVIF, IPPROTO_IP); 1739 } else { 1740 *mp = sbcreatecontrol((caddr_t) ifp->if_sadl, 1741 ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP); 1742 } 1743 if (*mp) 1744 mp = &(*mp)->m_next; 1745 if_put(ifp); 1746 } 1747 if (inp->inp_flags & INP_RECVTTL) { 1748 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1749 sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP); 1750 if (*mp) 1751 mp = &(*mp)->m_next; 1752 } 1753 if (inp->inp_flags & INP_RECVRTABLE) { 1754 u_int rtableid = inp->inp_rtableid; 1755 1756 #if NPF > 0 1757 if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1758 struct pf_divert *divert; 1759 1760 divert = pf_find_divert(m); 1761 KASSERT(divert != NULL); 1762 rtableid = divert->rdomain; 1763 } 1764 #endif 1765 1766 *mp = sbcreatecontrol((caddr_t) &rtableid, 1767 sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP); 1768 if (*mp) 1769 mp = &(*mp)->m_next; 1770 } 1771 } 1772 1773 void 1774 ip_send_dispatch(void *xmq) 1775 { 1776 struct mbuf_queue *mq = xmq; 1777 struct mbuf *m; 1778 struct mbuf_list ml; 1779 1780 mq_delist(mq, &ml); 1781 if (ml_empty(&ml)) 1782 return; 1783 1784 NET_LOCK(); 1785 while ((m = ml_dequeue(&ml)) != NULL) { 1786 ip_output(m, NULL, NULL, 0, NULL, NULL, 0); 1787 } 1788 NET_UNLOCK(); 1789 } 1790 1791 void 1792 ip_send(struct mbuf *m) 1793 { 1794 mq_enqueue(&ipsend_mq, m); 1795 task_add(net_tq(0), &ipsend_task); 1796 } 1797