1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 63 * $FreeBSD: src/sys/netinet/ip_input.c,v 1.130.2.52 2003/03/07 07:01:28 silby Exp $ 64 */ 65 66 #define _IP_VHL 67 68 #include "opt_bootp.h" 69 #include "opt_ipdn.h" 70 #include "opt_ipdivert.h" 71 #include "opt_ipstealth.h" 72 #include "opt_rss.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/mbuf.h> 77 #include <sys/malloc.h> 78 #include <sys/mpipe.h> 79 #include <sys/domain.h> 80 #include <sys/protosw.h> 81 #include <sys/socket.h> 82 #include <sys/time.h> 83 #include <sys/globaldata.h> 84 #include <sys/thread.h> 85 #include <sys/kernel.h> 86 #include <sys/syslog.h> 87 #include <sys/sysctl.h> 88 #include <sys/in_cksum.h> 89 #include <sys/lock.h> 90 91 #include <sys/mplock2.h> 92 93 #include <machine/stdarg.h> 94 95 #include <net/if.h> 96 #include <net/if_types.h> 97 #include <net/if_var.h> 98 #include <net/if_dl.h> 99 #include <net/pfil.h> 100 #include <net/route.h> 101 #include <net/netisr2.h> 102 103 #include <netinet/in.h> 104 #include <netinet/in_systm.h> 105 #include <netinet/in_var.h> 106 #include <netinet/ip.h> 107 #include <netinet/in_pcb.h> 108 #include <netinet/ip_var.h> 109 #include <netinet/ip_icmp.h> 110 #include <netinet/ip_divert.h> 111 #include <netinet/ip_flow.h> 112 113 #include <sys/thread2.h> 114 #include <sys/msgport2.h> 115 #include <net/netmsg2.h> 116 117 #include <sys/socketvar.h> 118 119 #include <net/ipfw/ip_fw.h> 120 #include <net/dummynet/ip_dummynet.h> 121 122 int rsvp_on = 0; 123 static int ip_rsvp_on; 124 struct socket *ip_rsvpd; 125 126 int ipforwarding = 0; 127 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 128 &ipforwarding, 0, "Enable IP forwarding between interfaces"); 129 130 static int ipsendredirects = 1; /* XXX */ 131 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 132 &ipsendredirects, 0, "Enable sending IP redirects"); 133 134 int ip_defttl = IPDEFTTL; 135 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, 136 &ip_defttl, 0, "Maximum TTL on IP packets"); 137 138 static int ip_dosourceroute = 0; 139 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW, 140 &ip_dosourceroute, 0, "Enable forwarding source routed IP packets"); 141 142 static int ip_acceptsourceroute = 0; 143 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute, 144 CTLFLAG_RW, &ip_acceptsourceroute, 0, 145 "Enable accepting source routed IP packets"); 146 147 static int maxnipq; 148 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW, 149 &maxnipq, 0, 150 "Maximum number of IPv4 fragment reassembly queue entries"); 151 152 static int maxfragsperpacket; 153 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 154 &maxfragsperpacket, 0, 155 "Maximum number of IPv4 fragments allowed per packet"); 156 157 static int ip_sendsourcequench = 0; 158 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 159 &ip_sendsourcequench, 0, 160 "Enable the transmission of source quench packets"); 161 162 int ip_do_randomid = 1; 163 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 164 &ip_do_randomid, 0, 165 "Assign random ip_id values"); 166 /* 167 * XXX - Setting ip_checkinterface mostly implements the receive side of 168 * the Strong ES model described in RFC 1122, but since the routing table 169 * and transmit implementation do not implement the Strong ES model, 170 * setting this to 1 results in an odd hybrid. 171 * 172 * XXX - ip_checkinterface currently must be disabled if you use ipnat 173 * to translate the destination address to another local interface. 174 * 175 * XXX - ip_checkinterface must be disabled if you add IP aliases 176 * to the loopback interface instead of the interface where the 177 * packets for those addresses are received. 178 */ 179 static int ip_checkinterface = 0; 180 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 181 &ip_checkinterface, 0, "Verify packet arrives on correct interface"); 182 183 static u_long ip_hash_count = 0; 184 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, hash_count, CTLFLAG_RD, 185 &ip_hash_count, 0, "Number of packets hashed by IP"); 186 187 #ifdef RSS_DEBUG 188 static u_long ip_rehash_count = 0; 189 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, rehash_count, CTLFLAG_RD, 190 &ip_rehash_count, 0, "Number of packets rehashed by IP"); 191 192 static u_long ip_dispatch_fast = 0; 193 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_fast_count, CTLFLAG_RD, 194 &ip_dispatch_fast, 0, "Number of packets handled on current CPU"); 195 196 static u_long ip_dispatch_slow = 0; 197 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_slow_count, CTLFLAG_RD, 198 &ip_dispatch_slow, 0, "Number of packets messaged to another CPU"); 199 #endif 200 201 #ifdef DIAGNOSTIC 202 static int ipprintfs = 0; 203 #endif 204 205 extern struct domain inetdomain; 206 extern struct protosw inetsw[]; 207 u_char ip_protox[IPPROTO_MAX]; 208 struct in_ifaddrhead in_ifaddrheads[MAXCPU]; /* first inet address */ 209 struct in_ifaddrhashhead *in_ifaddrhashtbls[MAXCPU]; 210 /* inet addr hash table */ 211 u_long in_ifaddrhmask; /* mask for hash table */ 212 213 static struct mbuf *ipforward_mtemp[MAXCPU]; 214 215 struct ip_stats ipstats_percpu[MAXCPU] __cachealign; 216 217 static int 218 sysctl_ipstats(SYSCTL_HANDLER_ARGS) 219 { 220 int cpu, error = 0; 221 222 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 223 if ((error = SYSCTL_OUT(req, &ipstats_percpu[cpu], 224 sizeof(struct ip_stats)))) 225 break; 226 if ((error = SYSCTL_IN(req, &ipstats_percpu[cpu], 227 sizeof(struct ip_stats)))) 228 break; 229 } 230 231 return (error); 232 } 233 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 234 0, 0, sysctl_ipstats, "S,ip_stats", "IP statistics"); 235 236 /* Packet reassembly stuff */ 237 #define IPREASS_NHASH_LOG2 6 238 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 239 #define IPREASS_HMASK (IPREASS_NHASH - 1) 240 #define IPREASS_HASH(x,y) \ 241 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) 242 243 TAILQ_HEAD(ipqhead, ipq); 244 struct ipfrag_queue { 245 int nipq; 246 volatile int draining; 247 struct netmsg_base timeo_netmsg; 248 struct callout timeo_ch; 249 struct netmsg_base drain_netmsg; 250 struct ipqhead ipq[IPREASS_NHASH]; 251 } __cachealign; 252 253 static struct ipfrag_queue ipfrag_queue_pcpu[MAXCPU]; 254 255 #ifdef IPCTL_DEFMTU 256 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 257 &ip_mtu, 0, "Default MTU"); 258 #endif 259 260 #ifdef IPSTEALTH 261 static int ipstealth = 0; 262 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, &ipstealth, 0, ""); 263 #else 264 static const int ipstealth = 0; 265 #endif 266 267 struct mbuf *(*ip_divert_p)(struct mbuf *, int, int); 268 269 struct pfil_head inet_pfil_hook; 270 271 /* 272 * struct ip_srcrt_opt is used to store packet state while it travels 273 * through the stack. 274 * 275 * XXX Note that the code even makes assumptions on the size and 276 * alignment of fields inside struct ip_srcrt so e.g. adding some 277 * fields will break the code. This needs to be fixed. 278 * 279 * We need to save the IP options in case a protocol wants to respond 280 * to an incoming packet over the same route if the packet got here 281 * using IP source routing. This allows connection establishment and 282 * maintenance when the remote end is on a network that is not known 283 * to us. 284 */ 285 struct ip_srcrt { 286 struct in_addr dst; /* final destination */ 287 char nop; /* one NOP to align */ 288 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */ 289 struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)]; 290 }; 291 292 struct ip_srcrt_opt { 293 int ip_nhops; 294 struct ip_srcrt ip_srcrt; 295 }; 296 297 #define IPFRAG_MPIPE_MAX 4096 298 #define MAXIPFRAG_MIN ((IPFRAG_MPIPE_MAX * 2) / 256) 299 300 #define IPFRAG_TIMEO (hz / PR_SLOWHZ) 301 302 static MALLOC_DEFINE(M_IPQ, "ipq", "IP Fragment Management"); 303 static struct malloc_pipe ipq_mpipe; 304 305 static void save_rte(struct mbuf *, u_char *, struct in_addr); 306 static int ip_dooptions(struct mbuf *m, int, struct sockaddr_in *); 307 static void ip_freef(struct ipfrag_queue *, struct ipqhead *, 308 struct ipq *); 309 static void ip_input_handler(netmsg_t); 310 311 static void ipfrag_timeo_dispatch(netmsg_t); 312 static void ipfrag_timeo(void *); 313 static void ipfrag_drain_dispatch(netmsg_t); 314 315 /* 316 * IP initialization: fill in IP protocol switch table. 317 * All protocols not implemented in kernel go to raw IP protocol handler. 318 */ 319 void 320 ip_init(void) 321 { 322 struct ipfrag_queue *fragq; 323 struct protosw *pr; 324 int cpu, i; 325 326 /* 327 * Make sure we can handle a reasonable number of fragments but 328 * cap it at IPFRAG_MPIPE_MAX. 329 */ 330 mpipe_init(&ipq_mpipe, M_IPQ, sizeof(struct ipq), 331 IFQ_MAXLEN, IPFRAG_MPIPE_MAX, 0, NULL, NULL, NULL); 332 333 /* 334 * Make in_ifaddrhead and in_ifaddrhashtbl available on all CPUs, 335 * since they could be accessed by any threads. 336 */ 337 for (cpu = 0; cpu < ncpus; ++cpu) { 338 TAILQ_INIT(&in_ifaddrheads[cpu]); 339 in_ifaddrhashtbls[cpu] = 340 hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask); 341 } 342 343 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 344 if (pr == NULL) 345 panic("ip_init"); 346 for (i = 0; i < IPPROTO_MAX; i++) 347 ip_protox[i] = pr - inetsw; 348 for (pr = inetdomain.dom_protosw; 349 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 350 if (pr->pr_domain->dom_family == PF_INET && pr->pr_protocol) { 351 if (pr->pr_protocol != IPPROTO_RAW) 352 ip_protox[pr->pr_protocol] = pr - inetsw; 353 } 354 } 355 356 inet_pfil_hook.ph_type = PFIL_TYPE_AF; 357 inet_pfil_hook.ph_af = AF_INET; 358 if ((i = pfil_head_register(&inet_pfil_hook)) != 0) { 359 kprintf("%s: WARNING: unable to register pfil hook, " 360 "error %d\n", __func__, i); 361 } 362 363 maxnipq = (nmbclusters / 32) / netisr_ncpus; 364 if (maxnipq < MAXIPFRAG_MIN) 365 maxnipq = MAXIPFRAG_MIN; 366 maxfragsperpacket = 16; 367 368 ip_id = time_second & 0xffff; /* time_second survives reboots */ 369 370 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 371 /* 372 * Initialize IP statistics counters for each CPU. 373 */ 374 bzero(&ipstats_percpu[cpu], sizeof(struct ip_stats)); 375 376 /* 377 * Preallocate mbuf template for forwarding 378 */ 379 MGETHDR(ipforward_mtemp[cpu], M_WAITOK, MT_DATA); 380 381 /* 382 * Initialize per-cpu ip fragments queues 383 */ 384 fragq = &ipfrag_queue_pcpu[cpu]; 385 for (i = 0; i < IPREASS_NHASH; i++) 386 TAILQ_INIT(&fragq->ipq[i]); 387 388 callout_init_mp(&fragq->timeo_ch); 389 netmsg_init(&fragq->timeo_netmsg, NULL, &netisr_adone_rport, 390 MSGF_PRIORITY, ipfrag_timeo_dispatch); 391 netmsg_init(&fragq->drain_netmsg, NULL, &netisr_adone_rport, 392 MSGF_PRIORITY, ipfrag_drain_dispatch); 393 } 394 395 netisr_register(NETISR_IP, ip_input_handler, ip_hashfn); 396 netisr_register_hashcheck(NETISR_IP, ip_hashcheck); 397 398 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 399 fragq = &ipfrag_queue_pcpu[cpu]; 400 callout_reset_bycpu(&fragq->timeo_ch, IPFRAG_TIMEO, 401 ipfrag_timeo, NULL, cpu); 402 } 403 404 ip_porthash_trycount = 2 * netisr_ncpus; 405 } 406 407 /* Do transport protocol processing. */ 408 static void 409 transport_processing_oncpu(struct mbuf *m, int hlen, struct ip *ip) 410 { 411 const struct protosw *pr = &inetsw[ip_protox[ip->ip_p]]; 412 413 /* 414 * Switch out to protocol's input routine. 415 */ 416 PR_GET_MPLOCK(pr); 417 pr->pr_input(&m, &hlen, ip->ip_p); 418 PR_REL_MPLOCK(pr); 419 } 420 421 static void 422 transport_processing_handler(netmsg_t msg) 423 { 424 struct netmsg_packet *pmsg = &msg->packet; 425 struct ip *ip; 426 int hlen; 427 428 ip = mtod(pmsg->nm_packet, struct ip *); 429 hlen = pmsg->base.lmsg.u.ms_result; 430 431 transport_processing_oncpu(pmsg->nm_packet, hlen, ip); 432 /* msg was embedded in the mbuf, do not reply! */ 433 } 434 435 static void 436 ip_input_handler(netmsg_t msg) 437 { 438 ip_input(msg->packet.nm_packet); 439 /* msg was embedded in the mbuf, do not reply! */ 440 } 441 442 /* 443 * IP input routine. Checksum and byte swap header. If fragmented 444 * try to reassemble. Process options. Pass to next level. 445 */ 446 void 447 ip_input(struct mbuf *m) 448 { 449 struct ip *ip; 450 struct in_ifaddr *ia = NULL; 451 struct in_ifaddr_container *iac; 452 int hlen, checkif; 453 u_short sum; 454 struct in_addr pkt_dst; 455 boolean_t using_srcrt = FALSE; /* forward (by PFIL_HOOKS) */ 456 struct in_addr odst; /* original dst address(NAT) */ 457 struct m_tag *mtag; 458 struct sockaddr_in *next_hop = NULL; 459 lwkt_port_t port; 460 461 ASSERT_NETISR_NCPUS(mycpuid); 462 M_ASSERTPKTHDR(m); 463 464 if (m->m_len < sizeof(struct ip)) { 465 kprintf("Issuer to ip_input failed to check IP header atomicy (%d)\n", 466 m->m_len); 467 ipstat.ips_badlen++; 468 goto bad; 469 } 470 #if 0 471 /* length checks already done in ip_hashfn() */ 472 KASSERT(m->m_len >= sizeof(struct ip), ("IP header not in one mbuf")); 473 #endif 474 475 /* 476 * This routine is called from numerous places which may not have 477 * characterized the packet. 478 */ 479 ip = mtod(m, struct ip *); 480 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 481 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))) { 482 /* 483 * Force hash recalculation for fragments and multicast 484 * packets; hardware may not do it correctly. 485 * XXX add flag to indicate the hash is from hardware 486 */ 487 m->m_flags &= ~M_HASH; 488 } 489 if ((m->m_flags & M_HASH) == 0) { 490 ip_hashfn(&m, 0); 491 if (m == NULL) 492 return; 493 KKASSERT(m->m_flags & M_HASH); 494 495 if (&curthread->td_msgport != 496 netisr_hashport(m->m_pkthdr.hash)) { 497 netisr_queue(NETISR_IP, m); 498 /* Requeued to other netisr msgport; done */ 499 return; 500 } 501 502 /* mbuf could have been changed */ 503 ip = mtod(m, struct ip *); 504 } 505 506 /* 507 * Pull out certain tags 508 */ 509 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { 510 /* Next hop */ 511 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 512 KKASSERT(mtag != NULL); 513 next_hop = m_tag_data(mtag); 514 } 515 516 if (m->m_pkthdr.fw_flags & 517 (DUMMYNET_MBUF_TAGGED | IPFW_MBUF_CONTINUE)) { 518 /* 519 * - Dummynet already filtered this packet. 520 * - This packet was processed by ipfw on another 521 * cpu, and the rest of the ipfw processing should 522 * be carried out on this cpu. 523 */ 524 ip = mtod(m, struct ip *); 525 ip->ip_len = ntohs(ip->ip_len); 526 ip->ip_off = ntohs(ip->ip_off); 527 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 528 goto iphack; 529 } 530 531 ipstat.ips_total++; 532 533 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) { 534 ipstat.ips_badvers++; 535 goto bad; 536 } 537 538 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 539 /* length checks already done in ip_hashfn() */ 540 KASSERT(hlen >= sizeof(struct ip), ("IP header len too small")); 541 KASSERT(m->m_len >= hlen, ("complete IP header not in one mbuf")); 542 543 /* 127/8 must not appear on wire - RFC1122 */ 544 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 545 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 546 if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK)) { 547 ipstat.ips_badaddr++; 548 goto bad; 549 } 550 } 551 552 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 553 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 554 } else { 555 if (hlen == sizeof(struct ip)) 556 sum = in_cksum_hdr(ip); 557 else 558 sum = in_cksum(m, hlen); 559 } 560 if (sum != 0) { 561 ipstat.ips_badsum++; 562 goto bad; 563 } 564 565 #ifdef ALTQ 566 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) { 567 /* packet is dropped by traffic conditioner */ 568 return; 569 } 570 #endif 571 /* 572 * Convert fields to host representation. 573 */ 574 ip->ip_len = ntohs(ip->ip_len); 575 ip->ip_off = ntohs(ip->ip_off); 576 577 /* length checks already done in ip_hashfn() */ 578 KASSERT(ip->ip_len >= hlen, ("total length less than header length")); 579 KASSERT(m->m_pkthdr.len >= ip->ip_len, ("mbuf too short")); 580 581 /* 582 * Trim mbufs if longer than the IP header would have us expect. 583 */ 584 if (m->m_pkthdr.len > ip->ip_len) { 585 if (m->m_len == m->m_pkthdr.len) { 586 m->m_len = ip->ip_len; 587 m->m_pkthdr.len = ip->ip_len; 588 } else { 589 m_adj(m, ip->ip_len - m->m_pkthdr.len); 590 } 591 } 592 593 /* 594 * IpHack's section. 595 * Right now when no processing on packet has done 596 * and it is still fresh out of network we do our black 597 * deals with it. 598 * - Firewall: deny/allow/divert 599 * - Xlate: translate packet's addr/port (NAT). 600 * - Pipe: pass pkt through dummynet. 601 * - Wrap: fake packet's addr/port <unimpl.> 602 * - Encapsulate: put it in another IP and send out. <unimp.> 603 */ 604 605 iphack: 606 /* 607 * If we've been forwarded from the output side, then 608 * skip the firewall a second time 609 */ 610 if (next_hop != NULL) 611 goto ours; 612 613 /* No pfil hooks */ 614 if (!pfil_has_hooks(&inet_pfil_hook)) { 615 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 616 /* 617 * Strip dummynet tags from stranded packets 618 */ 619 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 620 KKASSERT(mtag != NULL); 621 m_tag_delete(m, mtag); 622 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 623 } 624 goto pass; 625 } 626 627 /* 628 * Run through list of hooks for input packets. 629 * 630 * NOTE! If the packet is rewritten pf/ipfw/whoever must 631 * clear M_HASH. 632 */ 633 odst = ip->ip_dst; 634 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, PFIL_IN)) 635 return; 636 if (m == NULL) /* consumed by filter */ 637 return; 638 ip = mtod(m, struct ip *); 639 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 640 using_srcrt = (odst.s_addr != ip->ip_dst.s_addr); 641 642 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { 643 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 644 KKASSERT(mtag != NULL); 645 next_hop = m_tag_data(mtag); 646 } 647 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 648 ip_dn_queue(m); 649 return; 650 } 651 if (m->m_pkthdr.fw_flags & FW_MBUF_REDISPATCH) 652 m->m_pkthdr.fw_flags &= ~FW_MBUF_REDISPATCH; 653 if (m->m_pkthdr.fw_flags & IPFW_MBUF_CONTINUE) { 654 /* ipfw was disabled/unloaded. */ 655 goto bad; 656 } 657 pass: 658 /* 659 * Process options and, if not destined for us, 660 * ship it on. ip_dooptions returns 1 when an 661 * error was detected (causing an icmp message 662 * to be sent and the original packet to be freed). 663 */ 664 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, next_hop)) 665 return; 666 667 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 668 * matter if it is destined to another node, or whether it is 669 * a multicast one, RSVP wants it! and prevents it from being forwarded 670 * anywhere else. Also checks if the rsvp daemon is running before 671 * grabbing the packet. 672 */ 673 if (rsvp_on && ip->ip_p == IPPROTO_RSVP) 674 goto ours; 675 676 /* 677 * Check our list of addresses, to see if the packet is for us. 678 * If we don't have any addresses, assume any unicast packet 679 * we receive might be for us (and let the upper layers deal 680 * with it). 681 */ 682 if (TAILQ_EMPTY(&in_ifaddrheads[mycpuid]) && 683 !(m->m_flags & (M_MCAST | M_BCAST))) 684 goto ours; 685 686 /* 687 * Cache the destination address of the packet; this may be 688 * changed by use of 'ipfw fwd'. 689 */ 690 pkt_dst = next_hop ? next_hop->sin_addr : ip->ip_dst; 691 692 /* 693 * Enable a consistency check between the destination address 694 * and the arrival interface for a unicast packet (the RFC 1122 695 * strong ES model) if IP forwarding is disabled and the packet 696 * is not locally generated and the packet is not subject to 697 * 'ipfw fwd'. 698 * 699 * XXX - Checking also should be disabled if the destination 700 * address is ipnat'ed to a different interface. 701 * 702 * XXX - Checking is incompatible with IP aliases added 703 * to the loopback interface instead of the interface where 704 * the packets are received. 705 */ 706 checkif = ip_checkinterface && 707 !ipforwarding && 708 m->m_pkthdr.rcvif != NULL && 709 !(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) && 710 next_hop == NULL; 711 712 /* 713 * Check for exact addresses in the hash bucket. 714 */ 715 LIST_FOREACH(iac, INADDR_HASH(pkt_dst.s_addr), ia_hash) { 716 ia = iac->ia; 717 718 /* 719 * If the address matches, verify that the packet 720 * arrived via the correct interface if checking is 721 * enabled. 722 */ 723 if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst.s_addr && 724 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif)) 725 goto ours; 726 } 727 ia = NULL; 728 729 /* 730 * Check for broadcast addresses. 731 * 732 * Only accept broadcast packets that arrive via the matching 733 * interface. Reception of forwarded directed broadcasts would 734 * be handled via ip_forward() and ether_output() with the loopback 735 * into the stack for SIMPLEX interfaces handled by ether_output(). 736 */ 737 if (m->m_pkthdr.rcvif != NULL && 738 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) { 739 struct ifaddr_container *ifac; 740 741 TAILQ_FOREACH(ifac, &m->m_pkthdr.rcvif->if_addrheads[mycpuid], 742 ifa_link) { 743 struct ifaddr *ifa = ifac->ifa; 744 745 if (ifa->ifa_addr == NULL) /* shutdown/startup race */ 746 continue; 747 if (ifa->ifa_addr->sa_family != AF_INET) 748 continue; 749 ia = ifatoia(ifa); 750 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 751 pkt_dst.s_addr) 752 goto ours; 753 if (ia->ia_netbroadcast.s_addr == pkt_dst.s_addr) 754 goto ours; 755 #ifdef BOOTP_COMPAT 756 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) 757 goto ours; 758 #endif 759 } 760 } 761 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 762 struct in_multi *inm; 763 764 if (ip_mrouter != NULL) { 765 /* XXX Multicast routing is not MPSAFE yet */ 766 get_mplock(); 767 768 /* 769 * If we are acting as a multicast router, all 770 * incoming multicast packets are passed to the 771 * kernel-level multicast forwarding function. 772 * The packet is returned (relatively) intact; if 773 * ip_mforward() returns a non-zero value, the packet 774 * must be discarded, else it may be accepted below. 775 */ 776 if (ip_mforward != NULL && 777 ip_mforward(ip, m->m_pkthdr.rcvif, m, NULL) != 0) { 778 rel_mplock(); 779 ipstat.ips_cantforward++; 780 m_freem(m); 781 return; 782 } 783 784 rel_mplock(); 785 786 /* 787 * The process-level routing daemon needs to receive 788 * all multicast IGMP packets, whether or not this 789 * host belongs to their destination groups. 790 */ 791 if (ip->ip_p == IPPROTO_IGMP) 792 goto ours; 793 ipstat.ips_forward++; 794 } 795 /* 796 * See if we belong to the destination multicast group on the 797 * arrival interface. 798 */ 799 inm = IN_LOOKUP_MULTI(&ip->ip_dst, m->m_pkthdr.rcvif); 800 if (inm == NULL) { 801 ipstat.ips_notmember++; 802 m_freem(m); 803 return; 804 } 805 goto ours; 806 } 807 if (ip->ip_dst.s_addr == INADDR_BROADCAST) 808 goto ours; 809 if (ip->ip_dst.s_addr == INADDR_ANY) 810 goto ours; 811 812 /* 813 * Not for us; forward if possible and desirable. 814 */ 815 if (!ipforwarding) { 816 ipstat.ips_cantforward++; 817 m_freem(m); 818 } else { 819 ip_forward(m, using_srcrt, next_hop); 820 } 821 return; 822 823 ours: 824 825 /* 826 * IPSTEALTH: Process non-routing options only 827 * if the packet is destined for us. 828 */ 829 if (ipstealth && 830 hlen > sizeof(struct ip) && 831 ip_dooptions(m, 1, next_hop)) 832 return; 833 834 /* Count the packet in the ip address stats */ 835 if (ia != NULL) { 836 IFA_STAT_INC(&ia->ia_ifa, ipackets, 1); 837 IFA_STAT_INC(&ia->ia_ifa, ibytes, m->m_pkthdr.len); 838 } 839 840 /* 841 * If offset or IP_MF are set, must reassemble. 842 * Otherwise, nothing need be done. 843 * (We could look in the reassembly queue to see 844 * if the packet was previously fragmented, 845 * but it's not worth the time; just let them time out.) 846 */ 847 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { 848 /* 849 * Attempt reassembly; if it succeeds, proceed. ip_reass() 850 * will return a different mbuf. 851 * 852 * NOTE: ip_reass() returns m with M_HASH cleared to force 853 * us to recharacterize the packet. 854 */ 855 m = ip_reass(m); 856 if (m == NULL) 857 return; 858 ip = mtod(m, struct ip *); 859 860 /* Get the header length of the reassembled packet */ 861 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 862 } else { 863 ip->ip_len -= hlen; 864 } 865 866 /* 867 * We must forward the packet to the correct protocol thread if 868 * we are not already in it. 869 * 870 * NOTE: ip_len is now in host form. ip_len is not adjusted 871 * further for protocol processing, instead we pass hlen 872 * to the protosw and let it deal with it. 873 */ 874 ipstat.ips_delivered++; 875 876 if ((m->m_flags & M_HASH) == 0) { 877 m = ip_rehashm(m, hlen); 878 if (m == NULL) 879 return; 880 ip = mtod(m, struct ip *); 881 } 882 port = netisr_hashport(m->m_pkthdr.hash); 883 884 if (port != &curthread->td_msgport) { 885 ip_transport_redispatch(port, m, hlen); 886 } else { 887 #ifdef RSS_DEBUG 888 atomic_add_long(&ip_dispatch_fast, 1); 889 #endif 890 transport_processing_oncpu(m, hlen, ip); 891 } 892 return; 893 894 bad: 895 m_freem(m); 896 } 897 898 struct mbuf * 899 ip_rehashm(struct mbuf *m, int hlen) 900 { 901 struct ip *ip = mtod(m, struct ip *); 902 903 #ifdef RSS_DEBUG 904 atomic_add_long(&ip_rehash_count, 1); 905 #endif 906 ip->ip_len = htons(ip->ip_len + hlen); 907 ip->ip_off = htons(ip->ip_off); 908 909 ip_hashfn(&m, 0); 910 if (m == NULL) 911 return NULL; 912 913 /* 'm' might be changed by ip_hashfn(). */ 914 ip = mtod(m, struct ip *); 915 ip->ip_len = ntohs(ip->ip_len) - hlen; 916 ip->ip_off = ntohs(ip->ip_off); 917 KASSERT(m->m_flags & M_HASH, ("no hash")); 918 919 return (m); 920 } 921 922 void 923 ip_transport_redispatch(struct lwkt_port *port, struct mbuf *m, int hlen) 924 { 925 struct netmsg_packet *pmsg; 926 927 #ifdef RSS_DEBUG 928 atomic_add_long(&ip_dispatch_slow, 1); 929 #endif 930 931 pmsg = &m->m_hdr.mh_netmsg; 932 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport, 933 0, transport_processing_handler); 934 pmsg->nm_packet = m; 935 pmsg->base.lmsg.u.ms_result = hlen; 936 lwkt_sendmsg(port, &pmsg->base.lmsg); 937 } 938 939 /* 940 * Take incoming datagram fragment and try to reassemble it into 941 * whole datagram. If a chain for reassembly of this datagram already 942 * exists, then it is given as fp; otherwise have to make a chain. 943 */ 944 struct mbuf * 945 ip_reass(struct mbuf *m) 946 { 947 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid]; 948 struct ip *ip = mtod(m, struct ip *); 949 struct mbuf *p = NULL, *q, *nq; 950 struct mbuf *n; 951 struct ipq *fp = NULL; 952 struct ipqhead *head; 953 int hlen = IP_VHL_HL(ip->ip_vhl) << 2; 954 int i, next; 955 u_short sum; 956 957 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 958 if (maxnipq == 0 || maxfragsperpacket == 0) { 959 ipstat.ips_fragments++; 960 ipstat.ips_fragdropped++; 961 m_freem(m); 962 return NULL; 963 } 964 965 sum = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 966 /* 967 * Look for queue of fragments of this datagram. 968 */ 969 head = &fragq->ipq[sum]; 970 TAILQ_FOREACH(fp, head, ipq_list) { 971 if (ip->ip_id == fp->ipq_id && 972 ip->ip_src.s_addr == fp->ipq_src.s_addr && 973 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 974 ip->ip_p == fp->ipq_p) 975 goto found; 976 } 977 978 fp = NULL; 979 980 /* 981 * Enforce upper bound on number of fragmented packets 982 * for which we attempt reassembly; 983 * If maxnipq is -1, accept all fragments without limitation. 984 */ 985 if (fragq->nipq > maxnipq && maxnipq > 0) { 986 /* 987 * drop something from the tail of the current queue 988 * before proceeding further 989 */ 990 struct ipq *q = TAILQ_LAST(head, ipqhead); 991 if (q == NULL) { 992 /* 993 * The current queue is empty, 994 * so drop from one of the others. 995 */ 996 for (i = 0; i < IPREASS_NHASH; i++) { 997 struct ipq *r = TAILQ_LAST(&fragq->ipq[i], 998 ipqhead); 999 if (r) { 1000 ipstat.ips_fragtimeout += r->ipq_nfrags; 1001 ip_freef(fragq, &fragq->ipq[i], r); 1002 break; 1003 } 1004 } 1005 } else { 1006 ipstat.ips_fragtimeout += q->ipq_nfrags; 1007 ip_freef(fragq, head, q); 1008 } 1009 } 1010 found: 1011 /* 1012 * Adjust ip_len to not reflect header, 1013 * convert offset of this to bytes. 1014 */ 1015 ip->ip_len -= hlen; 1016 if (ip->ip_off & IP_MF) { 1017 /* 1018 * Make sure that fragments have a data length 1019 * that's a non-zero multiple of 8 bytes. 1020 */ 1021 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { 1022 ipstat.ips_toosmall++; /* XXX */ 1023 m_freem(m); 1024 goto done; 1025 } 1026 m->m_flags |= M_FRAG; 1027 } else { 1028 m->m_flags &= ~M_FRAG; 1029 } 1030 ip->ip_off <<= 3; 1031 1032 ipstat.ips_fragments++; 1033 m->m_pkthdr.header = ip; 1034 1035 /* 1036 * If the hardware has not done csum over this fragment 1037 * then csum_data is not valid at all. 1038 */ 1039 if ((m->m_pkthdr.csum_flags & (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID)) 1040 == (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID)) { 1041 m->m_pkthdr.csum_data = 0; 1042 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1043 } 1044 1045 /* 1046 * Presence of header sizes in mbufs 1047 * would confuse code below. 1048 */ 1049 m->m_data += hlen; 1050 m->m_len -= hlen; 1051 1052 /* 1053 * If first fragment to arrive, create a reassembly queue. 1054 */ 1055 if (fp == NULL) { 1056 if ((fp = mpipe_alloc_nowait(&ipq_mpipe)) == NULL) 1057 goto dropfrag; 1058 TAILQ_INSERT_HEAD(head, fp, ipq_list); 1059 fragq->nipq++; 1060 fp->ipq_nfrags = 1; 1061 fp->ipq_ttl = IPFRAGTTL; 1062 fp->ipq_p = ip->ip_p; 1063 fp->ipq_id = ip->ip_id; 1064 fp->ipq_src = ip->ip_src; 1065 fp->ipq_dst = ip->ip_dst; 1066 fp->ipq_frags = m; 1067 m->m_nextpkt = NULL; 1068 goto inserted; 1069 } 1070 fp->ipq_nfrags++; 1071 1072 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 1073 1074 /* 1075 * Find a segment which begins after this one does. 1076 */ 1077 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1078 if (GETIP(q)->ip_off > ip->ip_off) 1079 break; 1080 } 1081 1082 /* 1083 * If there is a preceding segment, it may provide some of 1084 * our data already. If so, drop the data from the incoming 1085 * segment. If it provides all of our data, drop us, otherwise 1086 * stick new segment in the proper place. 1087 * 1088 * If some of the data is dropped from the the preceding 1089 * segment, then it's checksum is invalidated. 1090 */ 1091 if (p) { 1092 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; 1093 if (i > 0) { 1094 if (i >= ip->ip_len) 1095 goto dropfrag; 1096 m_adj(m, i); 1097 m->m_pkthdr.csum_flags = 0; 1098 ip->ip_off += i; 1099 ip->ip_len -= i; 1100 } 1101 m->m_nextpkt = p->m_nextpkt; 1102 p->m_nextpkt = m; 1103 } else { 1104 m->m_nextpkt = fp->ipq_frags; 1105 fp->ipq_frags = m; 1106 } 1107 1108 /* 1109 * While we overlap succeeding segments trim them or, 1110 * if they are completely covered, dequeue them. 1111 */ 1112 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; 1113 q = nq) { 1114 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; 1115 if (i < GETIP(q)->ip_len) { 1116 GETIP(q)->ip_len -= i; 1117 GETIP(q)->ip_off += i; 1118 m_adj(q, i); 1119 q->m_pkthdr.csum_flags = 0; 1120 break; 1121 } 1122 nq = q->m_nextpkt; 1123 m->m_nextpkt = nq; 1124 ipstat.ips_fragdropped++; 1125 fp->ipq_nfrags--; 1126 q->m_nextpkt = NULL; 1127 m_freem(q); 1128 } 1129 1130 inserted: 1131 /* 1132 * Check for complete reassembly and perform frag per packet 1133 * limiting. 1134 * 1135 * Frag limiting is performed here so that the nth frag has 1136 * a chance to complete the packet before we drop the packet. 1137 * As a result, n+1 frags are actually allowed per packet, but 1138 * only n will ever be stored. (n = maxfragsperpacket.) 1139 * 1140 */ 1141 next = 0; 1142 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1143 if (GETIP(q)->ip_off != next) { 1144 if (fp->ipq_nfrags > maxfragsperpacket) { 1145 ipstat.ips_fragdropped += fp->ipq_nfrags; 1146 ip_freef(fragq, head, fp); 1147 } 1148 goto done; 1149 } 1150 next += GETIP(q)->ip_len; 1151 } 1152 /* Make sure the last packet didn't have the IP_MF flag */ 1153 if (p->m_flags & M_FRAG) { 1154 if (fp->ipq_nfrags > maxfragsperpacket) { 1155 ipstat.ips_fragdropped += fp->ipq_nfrags; 1156 ip_freef(fragq, head, fp); 1157 } 1158 goto done; 1159 } 1160 1161 /* 1162 * Reassembly is complete. Make sure the packet is a sane size. 1163 */ 1164 q = fp->ipq_frags; 1165 ip = GETIP(q); 1166 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) { 1167 ipstat.ips_toolong++; 1168 ipstat.ips_fragdropped += fp->ipq_nfrags; 1169 ip_freef(fragq, head, fp); 1170 goto done; 1171 } 1172 1173 /* 1174 * Concatenate fragments. 1175 */ 1176 m = q; 1177 n = m->m_next; 1178 m->m_next = NULL; 1179 m_cat(m, n); 1180 nq = q->m_nextpkt; 1181 q->m_nextpkt = NULL; 1182 for (q = nq; q != NULL; q = nq) { 1183 nq = q->m_nextpkt; 1184 q->m_nextpkt = NULL; 1185 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1186 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1187 m_cat(m, q); 1188 } 1189 1190 /* 1191 * Clean up the 1's complement checksum. Carry over 16 bits must 1192 * be added back. This assumes no more then 65535 packet fragments 1193 * were reassembled. A second carry can also occur (but not a third). 1194 */ 1195 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 1196 (m->m_pkthdr.csum_data >> 16); 1197 if (m->m_pkthdr.csum_data > 0xFFFF) 1198 m->m_pkthdr.csum_data -= 0xFFFF; 1199 1200 /* 1201 * Create header for new ip packet by 1202 * modifying header of first packet; 1203 * dequeue and discard fragment reassembly header. 1204 * Make header visible. 1205 */ 1206 ip->ip_len = next; 1207 ip->ip_src = fp->ipq_src; 1208 ip->ip_dst = fp->ipq_dst; 1209 TAILQ_REMOVE(head, fp, ipq_list); 1210 fragq->nipq--; 1211 mpipe_free(&ipq_mpipe, fp); 1212 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2); 1213 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2); 1214 /* some debugging cruft by sklower, below, will go away soon */ 1215 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ 1216 int plen = 0; 1217 1218 for (n = m; n; n = n->m_next) 1219 plen += n->m_len; 1220 m->m_pkthdr.len = plen; 1221 } 1222 1223 /* 1224 * Reassembly complete, return the next protocol. 1225 * 1226 * Be sure to clear M_HASH to force the packet 1227 * to be re-characterized. 1228 * 1229 * Clear M_FRAG, we are no longer a fragment. 1230 */ 1231 m->m_flags &= ~(M_HASH | M_FRAG); 1232 1233 ipstat.ips_reassembled++; 1234 return (m); 1235 1236 dropfrag: 1237 ipstat.ips_fragdropped++; 1238 if (fp != NULL) 1239 fp->ipq_nfrags--; 1240 m_freem(m); 1241 done: 1242 return (NULL); 1243 1244 #undef GETIP 1245 } 1246 1247 /* 1248 * Free a fragment reassembly header and all 1249 * associated datagrams. 1250 */ 1251 static void 1252 ip_freef(struct ipfrag_queue *fragq, struct ipqhead *fhp, struct ipq *fp) 1253 { 1254 struct mbuf *q; 1255 1256 /* 1257 * Remove first to protect against blocking 1258 */ 1259 TAILQ_REMOVE(fhp, fp, ipq_list); 1260 1261 /* 1262 * Clean out at our leisure 1263 */ 1264 while (fp->ipq_frags) { 1265 q = fp->ipq_frags; 1266 fp->ipq_frags = q->m_nextpkt; 1267 q->m_nextpkt = NULL; 1268 m_freem(q); 1269 } 1270 mpipe_free(&ipq_mpipe, fp); 1271 fragq->nipq--; 1272 } 1273 1274 /* 1275 * If a timer expires on a reassembly queue, discard it. 1276 */ 1277 static void 1278 ipfrag_timeo_dispatch(netmsg_t nmsg) 1279 { 1280 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid]; 1281 struct ipq *fp, *fp_temp; 1282 struct ipqhead *head; 1283 int i; 1284 1285 crit_enter(); 1286 netisr_replymsg(&nmsg->base, 0); /* reply ASAP */ 1287 crit_exit(); 1288 1289 if (fragq->nipq == 0) 1290 goto done; 1291 1292 for (i = 0; i < IPREASS_NHASH; i++) { 1293 head = &fragq->ipq[i]; 1294 TAILQ_FOREACH_MUTABLE(fp, head, ipq_list, fp_temp) { 1295 if (--fp->ipq_ttl == 0) { 1296 ipstat.ips_fragtimeout += fp->ipq_nfrags; 1297 ip_freef(fragq, head, fp); 1298 } 1299 } 1300 } 1301 /* 1302 * If we are over the maximum number of fragments 1303 * (due to the limit being lowered), drain off 1304 * enough to get down to the new limit. 1305 */ 1306 if (maxnipq >= 0 && fragq->nipq > maxnipq) { 1307 for (i = 0; i < IPREASS_NHASH; i++) { 1308 head = &fragq->ipq[i]; 1309 while (fragq->nipq > maxnipq && !TAILQ_EMPTY(head)) { 1310 ipstat.ips_fragdropped += 1311 TAILQ_FIRST(head)->ipq_nfrags; 1312 ip_freef(fragq, head, TAILQ_FIRST(head)); 1313 } 1314 } 1315 } 1316 done: 1317 callout_reset(&fragq->timeo_ch, IPFRAG_TIMEO, ipfrag_timeo, NULL); 1318 } 1319 1320 static void 1321 ipfrag_timeo(void *dummy __unused) 1322 { 1323 struct netmsg_base *msg = &ipfrag_queue_pcpu[mycpuid].timeo_netmsg; 1324 1325 crit_enter(); 1326 if (msg->lmsg.ms_flags & MSGF_DONE) 1327 netisr_sendmsg_oncpu(msg); 1328 crit_exit(); 1329 } 1330 1331 /* 1332 * Drain off all datagram fragments. 1333 */ 1334 static void 1335 ipfrag_drain_oncpu(struct ipfrag_queue *fragq) 1336 { 1337 struct ipqhead *head; 1338 int i; 1339 1340 for (i = 0; i < IPREASS_NHASH; i++) { 1341 head = &fragq->ipq[i]; 1342 while (!TAILQ_EMPTY(head)) { 1343 ipstat.ips_fragdropped += TAILQ_FIRST(head)->ipq_nfrags; 1344 ip_freef(fragq, head, TAILQ_FIRST(head)); 1345 } 1346 } 1347 } 1348 1349 static void 1350 ipfrag_drain_dispatch(netmsg_t nmsg) 1351 { 1352 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid]; 1353 1354 crit_enter(); 1355 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1356 crit_exit(); 1357 1358 ipfrag_drain_oncpu(fragq); 1359 fragq->draining = 0; 1360 } 1361 1362 static void 1363 ipfrag_drain_ipi(void *arg __unused) 1364 { 1365 int cpu = mycpuid; 1366 struct lwkt_msg *msg = &ipfrag_queue_pcpu[cpu].drain_netmsg.lmsg; 1367 1368 crit_enter(); 1369 if (msg->ms_flags & MSGF_DONE) 1370 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 1371 crit_exit(); 1372 } 1373 1374 static void 1375 ipfrag_drain(void) 1376 { 1377 cpumask_t mask; 1378 int cpu; 1379 1380 CPUMASK_ASSBMASK(mask, netisr_ncpus); 1381 CPUMASK_ANDMASK(mask, smp_active_mask); 1382 1383 if (IN_NETISR_NCPUS(mycpuid)) { 1384 ipfrag_drain_oncpu(&ipfrag_queue_pcpu[mycpuid]); 1385 CPUMASK_NANDBIT(mask, mycpuid); 1386 } 1387 1388 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 1389 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[cpu]; 1390 1391 if (!CPUMASK_TESTBIT(mask, cpu)) 1392 continue; 1393 1394 if (fragq->nipq == 0 || fragq->draining) { 1395 /* No fragments or is draining; skip this cpu. */ 1396 CPUMASK_NANDBIT(mask, cpu); 1397 continue; 1398 } 1399 fragq->draining = 1; 1400 } 1401 1402 if (CPUMASK_TESTNZERO(mask)) 1403 lwkt_send_ipiq_mask(mask, ipfrag_drain_ipi, NULL); 1404 } 1405 1406 void 1407 ip_drain(void) 1408 { 1409 ipfrag_drain(); 1410 in_rtqdrain(); 1411 } 1412 1413 /* 1414 * Do option processing on a datagram, 1415 * possibly discarding it if bad options are encountered, 1416 * or forwarding it if source-routed. 1417 * The pass argument is used when operating in the IPSTEALTH 1418 * mode to tell what options to process: 1419 * [LS]SRR (pass 0) or the others (pass 1). 1420 * The reason for as many as two passes is that when doing IPSTEALTH, 1421 * non-routing options should be processed only if the packet is for us. 1422 * Returns 1 if packet has been forwarded/freed, 1423 * 0 if the packet should be processed further. 1424 */ 1425 static int 1426 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop) 1427 { 1428 struct sockaddr_in ipaddr = { sizeof ipaddr, AF_INET }; 1429 struct ip *ip = mtod(m, struct ip *); 1430 u_char *cp; 1431 struct in_ifaddr *ia; 1432 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB; 1433 boolean_t forward = FALSE; 1434 struct in_addr *sin, dst; 1435 n_time ntime; 1436 1437 dst = ip->ip_dst; 1438 cp = (u_char *)(ip + 1); 1439 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); 1440 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1441 opt = cp[IPOPT_OPTVAL]; 1442 if (opt == IPOPT_EOL) 1443 break; 1444 if (opt == IPOPT_NOP) 1445 optlen = 1; 1446 else { 1447 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1448 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1449 goto bad; 1450 } 1451 optlen = cp[IPOPT_OLEN]; 1452 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1453 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1454 goto bad; 1455 } 1456 } 1457 switch (opt) { 1458 1459 default: 1460 break; 1461 1462 /* 1463 * Source routing with record. 1464 * Find interface with current destination address. 1465 * If none on this machine then drop if strictly routed, 1466 * or do nothing if loosely routed. 1467 * Record interface address and bring up next address 1468 * component. If strictly routed make sure next 1469 * address is on directly accessible net. 1470 */ 1471 case IPOPT_LSRR: 1472 case IPOPT_SSRR: 1473 if (ipstealth && pass > 0) 1474 break; 1475 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1476 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1477 goto bad; 1478 } 1479 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1480 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1481 goto bad; 1482 } 1483 ipaddr.sin_addr = ip->ip_dst; 1484 ia = (struct in_ifaddr *) 1485 ifa_ifwithaddr((struct sockaddr *)&ipaddr); 1486 if (ia == NULL) { 1487 if (opt == IPOPT_SSRR) { 1488 type = ICMP_UNREACH; 1489 code = ICMP_UNREACH_SRCFAIL; 1490 goto bad; 1491 } 1492 if (!ip_dosourceroute) 1493 goto nosourcerouting; 1494 /* 1495 * Loose routing, and not at next destination 1496 * yet; nothing to do except forward. 1497 */ 1498 break; 1499 } 1500 off--; /* 0 origin */ 1501 if (off > optlen - (int)sizeof(struct in_addr)) { 1502 /* 1503 * End of source route. Should be for us. 1504 */ 1505 if (!ip_acceptsourceroute) 1506 goto nosourcerouting; 1507 save_rte(m, cp, ip->ip_src); 1508 break; 1509 } 1510 if (ipstealth) 1511 goto dropit; 1512 if (!ip_dosourceroute) { 1513 if (ipforwarding) { 1514 char sbuf[INET_ADDRSTRLEN]; 1515 char dbuf[INET_ADDRSTRLEN]; 1516 1517 /* 1518 * Acting as a router, so generate ICMP 1519 */ 1520 nosourcerouting: 1521 log(LOG_WARNING, 1522 "attempted source route from %s to %s\n", 1523 kinet_ntoa(ip->ip_src, sbuf), 1524 kinet_ntoa(ip->ip_dst, dbuf)); 1525 type = ICMP_UNREACH; 1526 code = ICMP_UNREACH_SRCFAIL; 1527 goto bad; 1528 } else { 1529 /* 1530 * Not acting as a router, 1531 * so silently drop. 1532 */ 1533 dropit: 1534 ipstat.ips_cantforward++; 1535 m_freem(m); 1536 return (1); 1537 } 1538 } 1539 1540 /* 1541 * locate outgoing interface 1542 */ 1543 memcpy(&ipaddr.sin_addr, cp + off, 1544 sizeof ipaddr.sin_addr); 1545 1546 if (opt == IPOPT_SSRR) { 1547 #define INA struct in_ifaddr * 1548 #define SA struct sockaddr * 1549 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) 1550 == NULL) 1551 ia = (INA)ifa_ifwithnet((SA)&ipaddr); 1552 } else { 1553 ia = ip_rtaddr(ipaddr.sin_addr, NULL); 1554 } 1555 if (ia == NULL) { 1556 type = ICMP_UNREACH; 1557 code = ICMP_UNREACH_SRCFAIL; 1558 goto bad; 1559 } 1560 ip->ip_dst = ipaddr.sin_addr; 1561 memcpy(cp + off, &IA_SIN(ia)->sin_addr, 1562 sizeof(struct in_addr)); 1563 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1564 /* 1565 * Let ip_intr's mcast routing check handle mcast pkts 1566 */ 1567 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr)); 1568 break; 1569 1570 case IPOPT_RR: 1571 if (ipstealth && pass == 0) 1572 break; 1573 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1574 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1575 goto bad; 1576 } 1577 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1578 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1579 goto bad; 1580 } 1581 /* 1582 * If no space remains, ignore. 1583 */ 1584 off--; /* 0 origin */ 1585 if (off > optlen - (int)sizeof(struct in_addr)) 1586 break; 1587 memcpy(&ipaddr.sin_addr, &ip->ip_dst, 1588 sizeof ipaddr.sin_addr); 1589 /* 1590 * locate outgoing interface; if we're the destination, 1591 * use the incoming interface (should be same). 1592 */ 1593 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == NULL && 1594 (ia = ip_rtaddr(ipaddr.sin_addr, NULL)) == NULL) { 1595 type = ICMP_UNREACH; 1596 code = ICMP_UNREACH_HOST; 1597 goto bad; 1598 } 1599 memcpy(cp + off, &IA_SIN(ia)->sin_addr, 1600 sizeof(struct in_addr)); 1601 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1602 break; 1603 1604 case IPOPT_TS: 1605 if (ipstealth && pass == 0) 1606 break; 1607 code = cp - (u_char *)ip; 1608 if (optlen < 4 || optlen > 40) { 1609 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1610 goto bad; 1611 } 1612 if ((off = cp[IPOPT_OFFSET]) < 5) { 1613 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1614 goto bad; 1615 } 1616 if (off > optlen - (int)sizeof(int32_t)) { 1617 cp[IPOPT_OFFSET + 1] += (1 << 4); 1618 if ((cp[IPOPT_OFFSET + 1] & 0xf0) == 0) { 1619 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1620 goto bad; 1621 } 1622 break; 1623 } 1624 off--; /* 0 origin */ 1625 sin = (struct in_addr *)(cp + off); 1626 switch (cp[IPOPT_OFFSET + 1] & 0x0f) { 1627 1628 case IPOPT_TS_TSONLY: 1629 break; 1630 1631 case IPOPT_TS_TSANDADDR: 1632 if (off + sizeof(n_time) + 1633 sizeof(struct in_addr) > optlen) { 1634 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1635 goto bad; 1636 } 1637 ipaddr.sin_addr = dst; 1638 ia = (INA)ifaof_ifpforaddr((SA)&ipaddr, 1639 m->m_pkthdr.rcvif); 1640 if (ia == NULL) 1641 continue; 1642 memcpy(sin, &IA_SIN(ia)->sin_addr, 1643 sizeof(struct in_addr)); 1644 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1645 off += sizeof(struct in_addr); 1646 break; 1647 1648 case IPOPT_TS_PRESPEC: 1649 if (off + sizeof(n_time) + 1650 sizeof(struct in_addr) > optlen) { 1651 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1652 goto bad; 1653 } 1654 memcpy(&ipaddr.sin_addr, sin, 1655 sizeof(struct in_addr)); 1656 if (ifa_ifwithaddr((SA)&ipaddr) == NULL) 1657 continue; 1658 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1659 off += sizeof(struct in_addr); 1660 break; 1661 1662 default: 1663 code = &cp[IPOPT_OFFSET + 1] - (u_char *)ip; 1664 goto bad; 1665 } 1666 ntime = iptime(); 1667 memcpy(cp + off, &ntime, sizeof(n_time)); 1668 cp[IPOPT_OFFSET] += sizeof(n_time); 1669 } 1670 } 1671 if (forward && ipforwarding) { 1672 ip_forward(m, TRUE, next_hop); 1673 return (1); 1674 } 1675 return (0); 1676 bad: 1677 icmp_error(m, type, code, 0, 0); 1678 ipstat.ips_badoptions++; 1679 return (1); 1680 } 1681 1682 /* 1683 * Given address of next destination (final or next hop), 1684 * return internet address info of interface to be used to get there. 1685 */ 1686 struct in_ifaddr * 1687 ip_rtaddr(struct in_addr dst, struct route *ro0) 1688 { 1689 struct route sro, *ro; 1690 struct sockaddr_in *sin; 1691 struct in_ifaddr *ia; 1692 1693 if (ro0 != NULL) { 1694 ro = ro0; 1695 } else { 1696 bzero(&sro, sizeof(sro)); 1697 ro = &sro; 1698 } 1699 1700 sin = (struct sockaddr_in *)&ro->ro_dst; 1701 1702 if (ro->ro_rt == NULL || dst.s_addr != sin->sin_addr.s_addr) { 1703 if (ro->ro_rt != NULL) { 1704 RTFREE(ro->ro_rt); 1705 ro->ro_rt = NULL; 1706 } 1707 sin->sin_family = AF_INET; 1708 sin->sin_len = sizeof *sin; 1709 sin->sin_addr = dst; 1710 rtalloc_ign(ro, RTF_PRCLONING); 1711 } 1712 1713 if (ro->ro_rt == NULL) 1714 return (NULL); 1715 1716 ia = ifatoia(ro->ro_rt->rt_ifa); 1717 1718 if (ro == &sro) 1719 RTFREE(ro->ro_rt); 1720 return ia; 1721 } 1722 1723 /* 1724 * Save incoming source route for use in replies, 1725 * to be picked up later by ip_srcroute if the receiver is interested. 1726 */ 1727 static void 1728 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1729 { 1730 struct m_tag *mtag; 1731 struct ip_srcrt_opt *opt; 1732 unsigned olen; 1733 1734 mtag = m_tag_get(PACKET_TAG_IPSRCRT, sizeof(*opt), M_NOWAIT); 1735 if (mtag == NULL) 1736 return; 1737 opt = m_tag_data(mtag); 1738 1739 olen = option[IPOPT_OLEN]; 1740 #ifdef DIAGNOSTIC 1741 if (ipprintfs) 1742 kprintf("save_rte: olen %d\n", olen); 1743 #endif 1744 if (olen > sizeof(opt->ip_srcrt) - (1 + sizeof(dst))) { 1745 m_tag_free(mtag); 1746 return; 1747 } 1748 bcopy(option, opt->ip_srcrt.srcopt, olen); 1749 opt->ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1750 opt->ip_srcrt.dst = dst; 1751 m_tag_prepend(m, mtag); 1752 } 1753 1754 /* 1755 * Retrieve incoming source route for use in replies, 1756 * in the same form used by setsockopt. 1757 * The first hop is placed before the options, will be removed later. 1758 */ 1759 struct mbuf * 1760 ip_srcroute(struct mbuf *m0) 1761 { 1762 struct in_addr *p, *q; 1763 struct mbuf *m; 1764 struct m_tag *mtag; 1765 struct ip_srcrt_opt *opt; 1766 1767 if (m0 == NULL) 1768 return NULL; 1769 1770 mtag = m_tag_find(m0, PACKET_TAG_IPSRCRT, NULL); 1771 if (mtag == NULL) 1772 return NULL; 1773 opt = m_tag_data(mtag); 1774 1775 if (opt->ip_nhops == 0) 1776 return (NULL); 1777 m = m_get(M_NOWAIT, MT_HEADER); 1778 if (m == NULL) 1779 return (NULL); 1780 1781 #define OPTSIZ (sizeof(opt->ip_srcrt.nop) + sizeof(opt->ip_srcrt.srcopt)) 1782 1783 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */ 1784 m->m_len = opt->ip_nhops * sizeof(struct in_addr) + 1785 sizeof(struct in_addr) + OPTSIZ; 1786 #ifdef DIAGNOSTIC 1787 if (ipprintfs) { 1788 kprintf("ip_srcroute: nhops %d mlen %d", 1789 opt->ip_nhops, m->m_len); 1790 } 1791 #endif 1792 1793 /* 1794 * First save first hop for return route 1795 */ 1796 p = &opt->ip_srcrt.route[opt->ip_nhops - 1]; 1797 *(mtod(m, struct in_addr *)) = *p--; 1798 #ifdef DIAGNOSTIC 1799 if (ipprintfs) 1800 kprintf(" hops %x", ntohl(mtod(m, struct in_addr *)->s_addr)); 1801 #endif 1802 1803 /* 1804 * Copy option fields and padding (nop) to mbuf. 1805 */ 1806 opt->ip_srcrt.nop = IPOPT_NOP; 1807 opt->ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; 1808 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &opt->ip_srcrt.nop, 1809 OPTSIZ); 1810 q = (struct in_addr *)(mtod(m, caddr_t) + 1811 sizeof(struct in_addr) + OPTSIZ); 1812 #undef OPTSIZ 1813 /* 1814 * Record return path as an IP source route, 1815 * reversing the path (pointers are now aligned). 1816 */ 1817 while (p >= opt->ip_srcrt.route) { 1818 #ifdef DIAGNOSTIC 1819 if (ipprintfs) 1820 kprintf(" %x", ntohl(q->s_addr)); 1821 #endif 1822 *q++ = *p--; 1823 } 1824 /* 1825 * Last hop goes to final destination. 1826 */ 1827 *q = opt->ip_srcrt.dst; 1828 m_tag_delete(m0, mtag); 1829 #ifdef DIAGNOSTIC 1830 if (ipprintfs) 1831 kprintf(" %x\n", ntohl(q->s_addr)); 1832 #endif 1833 return (m); 1834 } 1835 1836 /* 1837 * Strip out IP options. 1838 */ 1839 void 1840 ip_stripoptions(struct mbuf *m) 1841 { 1842 int datalen; 1843 struct ip *ip = mtod(m, struct ip *); 1844 caddr_t opts; 1845 int optlen; 1846 1847 optlen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); 1848 opts = (caddr_t)(ip + 1); 1849 datalen = m->m_len - (sizeof(struct ip) + optlen); 1850 bcopy(opts + optlen, opts, datalen); 1851 m->m_len -= optlen; 1852 if (m->m_flags & M_PKTHDR) 1853 m->m_pkthdr.len -= optlen; 1854 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2); 1855 } 1856 1857 u_char inetctlerrmap[PRC_NCMDS] = { 1858 0, 0, 0, 0, 1859 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1860 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1861 EMSGSIZE, EHOSTUNREACH, 0, 0, 1862 0, 0, 0, 0, 1863 ENOPROTOOPT, ECONNREFUSED 1864 }; 1865 1866 /* 1867 * Forward a packet. If some error occurs return the sender 1868 * an icmp packet. Note we can't always generate a meaningful 1869 * icmp message because icmp doesn't have a large enough repertoire 1870 * of codes and types. 1871 * 1872 * If not forwarding, just drop the packet. This could be confusing 1873 * if ipforwarding was zero but some routing protocol was advancing 1874 * us as a gateway to somewhere. However, we must let the routing 1875 * protocol deal with that. 1876 * 1877 * The using_srcrt parameter indicates whether the packet is being forwarded 1878 * via a source route. 1879 */ 1880 void 1881 ip_forward(struct mbuf *m, boolean_t using_srcrt, struct sockaddr_in *next_hop) 1882 { 1883 struct ip *ip = mtod(m, struct ip *); 1884 struct rtentry *rt; 1885 struct route fwd_ro; 1886 int error, type = 0, code = 0, destmtu = 0; 1887 struct mbuf *mcopy, *mtemp = NULL; 1888 n_long dest; 1889 struct in_addr pkt_dst; 1890 1891 dest = INADDR_ANY; 1892 /* 1893 * Cache the destination address of the packet; this may be 1894 * changed by use of 'ipfw fwd'. 1895 */ 1896 pkt_dst = (next_hop != NULL) ? next_hop->sin_addr : ip->ip_dst; 1897 1898 #ifdef DIAGNOSTIC 1899 if (ipprintfs) 1900 kprintf("forward: src %x dst %x ttl %x\n", 1901 ip->ip_src.s_addr, pkt_dst.s_addr, ip->ip_ttl); 1902 #endif 1903 1904 if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) { 1905 ipstat.ips_cantforward++; 1906 m_freem(m); 1907 return; 1908 } 1909 if (!ipstealth && ip->ip_ttl <= IPTTLDEC) { 1910 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1911 return; 1912 } 1913 1914 bzero(&fwd_ro, sizeof(fwd_ro)); 1915 ip_rtaddr(pkt_dst, &fwd_ro); 1916 if (fwd_ro.ro_rt == NULL) { 1917 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 1918 return; 1919 } 1920 rt = fwd_ro.ro_rt; 1921 1922 if (curthread->td_type == TD_TYPE_NETISR) { 1923 /* 1924 * Save the IP header and at most 8 bytes of the payload, 1925 * in case we need to generate an ICMP message to the src. 1926 */ 1927 mtemp = ipforward_mtemp[mycpuid]; 1928 KASSERT((mtemp->m_flags & M_EXT) == 0 && 1929 mtemp->m_data == mtemp->m_pktdat && 1930 m_tag_first(mtemp) == NULL, 1931 ("ip_forward invalid mtemp1")); 1932 1933 if (!m_dup_pkthdr(mtemp, m, M_NOWAIT)) { 1934 /* 1935 * It's probably ok if the pkthdr dup fails (because 1936 * the deep copy of the tag chain failed), but for now 1937 * be conservative and just discard the copy since 1938 * code below may some day want the tags. 1939 */ 1940 mtemp = NULL; 1941 } else { 1942 mtemp->m_type = m->m_type; 1943 mtemp->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8, 1944 (int)ip->ip_len); 1945 mtemp->m_pkthdr.len = mtemp->m_len; 1946 m_copydata(m, 0, mtemp->m_len, mtod(mtemp, caddr_t)); 1947 } 1948 } 1949 1950 if (!ipstealth) 1951 ip->ip_ttl -= IPTTLDEC; 1952 1953 /* 1954 * If forwarding packet using same interface that it came in on, 1955 * perhaps should send a redirect to sender to shortcut a hop. 1956 * Only send redirect if source is sending directly to us, 1957 * and if packet was not source routed (or has any options). 1958 * Also, don't send redirect if forwarding using a default route 1959 * or a route modified by a redirect. 1960 */ 1961 if (rt->rt_ifp == m->m_pkthdr.rcvif && 1962 !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) && 1963 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY && 1964 ipsendredirects && !using_srcrt && next_hop == NULL) { 1965 u_long src = ntohl(ip->ip_src.s_addr); 1966 struct in_ifaddr *rt_ifa = (struct in_ifaddr *)rt->rt_ifa; 1967 1968 if (rt_ifa != NULL && 1969 (src & rt_ifa->ia_subnetmask) == rt_ifa->ia_subnet) { 1970 if (rt->rt_flags & RTF_GATEWAY) 1971 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1972 else 1973 dest = pkt_dst.s_addr; 1974 /* 1975 * Router requirements says to only send 1976 * host redirects. 1977 */ 1978 type = ICMP_REDIRECT; 1979 code = ICMP_REDIRECT_HOST; 1980 #ifdef DIAGNOSTIC 1981 if (ipprintfs) 1982 kprintf("redirect (%d) to %x\n", code, dest); 1983 #endif 1984 } 1985 } 1986 1987 error = ip_output(m, NULL, &fwd_ro, IP_FORWARDING, NULL, NULL); 1988 if (error == 0) { 1989 ipstat.ips_forward++; 1990 if (type == 0) { 1991 if (mtemp) 1992 ipflow_create(&fwd_ro, mtemp); 1993 goto done; 1994 } 1995 ipstat.ips_redirectsent++; 1996 } else { 1997 ipstat.ips_cantforward++; 1998 } 1999 2000 if (mtemp == NULL) 2001 goto done; 2002 2003 /* 2004 * Errors that do not require generating ICMP message 2005 */ 2006 switch (error) { 2007 case ENOBUFS: 2008 /* 2009 * A router should not generate ICMP_SOURCEQUENCH as 2010 * required in RFC1812 Requirements for IP Version 4 Routers. 2011 * Source quench could be a big problem under DoS attacks, 2012 * or if the underlying interface is rate-limited. 2013 * Those who need source quench packets may re-enable them 2014 * via the net.inet.ip.sendsourcequench sysctl. 2015 */ 2016 if (!ip_sendsourcequench) 2017 goto done; 2018 break; 2019 2020 case EACCES: /* ipfw denied packet */ 2021 goto done; 2022 } 2023 2024 KASSERT((mtemp->m_flags & M_EXT) == 0 && 2025 mtemp->m_data == mtemp->m_pktdat, 2026 ("ip_forward invalid mtemp2")); 2027 mcopy = m_copym(mtemp, 0, mtemp->m_len, M_NOWAIT); 2028 if (mcopy == NULL) 2029 goto done; 2030 2031 /* 2032 * Send ICMP message. 2033 */ 2034 switch (error) { 2035 case 0: /* forwarded, but need redirect */ 2036 /* type, code set above */ 2037 break; 2038 2039 case ENETUNREACH: /* shouldn't happen, checked above */ 2040 case EHOSTUNREACH: 2041 case ENETDOWN: 2042 case EHOSTDOWN: 2043 default: 2044 type = ICMP_UNREACH; 2045 code = ICMP_UNREACH_HOST; 2046 break; 2047 2048 case EMSGSIZE: 2049 type = ICMP_UNREACH; 2050 code = ICMP_UNREACH_NEEDFRAG; 2051 if (fwd_ro.ro_rt != NULL) 2052 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu; 2053 ipstat.ips_cantfrag++; 2054 break; 2055 2056 case ENOBUFS: 2057 type = ICMP_SOURCEQUENCH; 2058 code = 0; 2059 break; 2060 2061 case EACCES: /* ipfw denied packet */ 2062 panic("ip_forward EACCES should not reach"); 2063 } 2064 icmp_error(mcopy, type, code, dest, destmtu); 2065 done: 2066 if (mtemp != NULL) 2067 m_tag_delete_chain(mtemp); 2068 if (fwd_ro.ro_rt != NULL) 2069 RTFREE(fwd_ro.ro_rt); 2070 } 2071 2072 void 2073 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 2074 struct mbuf *m) 2075 { 2076 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 2077 struct timeval tv; 2078 2079 microtime(&tv); 2080 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 2081 SCM_TIMESTAMP, SOL_SOCKET); 2082 if (*mp) 2083 mp = &(*mp)->m_next; 2084 } 2085 if (inp->inp_flags & INP_RECVDSTADDR) { 2086 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 2087 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 2088 if (*mp) 2089 mp = &(*mp)->m_next; 2090 } 2091 if (inp->inp_flags & INP_RECVTTL) { 2092 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 2093 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 2094 if (*mp) 2095 mp = &(*mp)->m_next; 2096 } 2097 if (inp->inp_flags & INP_RECVTOS) { 2098 *mp = sbcreatecontrol((caddr_t) &ip->ip_tos, 2099 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 2100 if (*mp) 2101 mp = &(*mp)->m_next; 2102 } 2103 #ifdef notyet 2104 /* XXX 2105 * Moving these out of udp_input() made them even more broken 2106 * than they already were. 2107 */ 2108 /* options were tossed already */ 2109 if (inp->inp_flags & INP_RECVOPTS) { 2110 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 2111 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 2112 if (*mp) 2113 mp = &(*mp)->m_next; 2114 } 2115 /* ip_srcroute doesn't do what we want here, need to fix */ 2116 if (inp->inp_flags & INP_RECVRETOPTS) { 2117 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 2118 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 2119 if (*mp) 2120 mp = &(*mp)->m_next; 2121 } 2122 #endif 2123 if (inp->inp_flags & INP_RECVIF) { 2124 struct ifnet *ifp; 2125 struct sdlbuf { 2126 struct sockaddr_dl sdl; 2127 u_char pad[32]; 2128 } sdlbuf; 2129 struct sockaddr_dl *sdp; 2130 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 2131 2132 if (((ifp = m->m_pkthdr.rcvif)) && 2133 ((ifp->if_index != 0) && (ifp->if_index <= if_index))) { 2134 sdp = IF_LLSOCKADDR(ifp); 2135 /* 2136 * Change our mind and don't try copy. 2137 */ 2138 if ((sdp->sdl_family != AF_LINK) || 2139 (sdp->sdl_len > sizeof(sdlbuf))) { 2140 goto makedummy; 2141 } 2142 bcopy(sdp, sdl2, sdp->sdl_len); 2143 } else { 2144 makedummy: 2145 sdl2->sdl_len = 2146 offsetof(struct sockaddr_dl, sdl_data[0]); 2147 sdl2->sdl_family = AF_LINK; 2148 sdl2->sdl_index = 0; 2149 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 2150 } 2151 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 2152 IP_RECVIF, IPPROTO_IP); 2153 if (*mp) 2154 mp = &(*mp)->m_next; 2155 } 2156 } 2157 2158 /* 2159 * XXX these routines are called from the upper part of the kernel. 2160 * 2161 * They could also be moved to ip_mroute.c, since all the RSVP 2162 * handling is done there already. 2163 */ 2164 int 2165 ip_rsvp_init(struct socket *so) 2166 { 2167 if (so->so_type != SOCK_RAW || 2168 so->so_proto->pr_protocol != IPPROTO_RSVP) 2169 return EOPNOTSUPP; 2170 2171 if (ip_rsvpd != NULL) 2172 return EADDRINUSE; 2173 2174 ip_rsvpd = so; 2175 /* 2176 * This may seem silly, but we need to be sure we don't over-increment 2177 * the RSVP counter, in case something slips up. 2178 */ 2179 if (!ip_rsvp_on) { 2180 ip_rsvp_on = 1; 2181 rsvp_on++; 2182 } 2183 2184 return 0; 2185 } 2186 2187 int 2188 ip_rsvp_done(void) 2189 { 2190 ip_rsvpd = NULL; 2191 /* 2192 * This may seem silly, but we need to be sure we don't over-decrement 2193 * the RSVP counter, in case something slips up. 2194 */ 2195 if (ip_rsvp_on) { 2196 ip_rsvp_on = 0; 2197 rsvp_on--; 2198 } 2199 return 0; 2200 } 2201 2202 int 2203 rsvp_input(struct mbuf **mp, int *offp, int proto) 2204 { 2205 struct mbuf *m = *mp; 2206 2207 *mp = NULL; 2208 2209 if (rsvp_input_p) { /* call the real one if loaded */ 2210 *mp = m; 2211 rsvp_input_p(mp, offp, proto); 2212 return(IPPROTO_DONE); 2213 } 2214 2215 /* Can still get packets with rsvp_on = 0 if there is a local member 2216 * of the group to which the RSVP packet is addressed. But in this 2217 * case we want to throw the packet away. 2218 */ 2219 2220 if (!rsvp_on) { 2221 m_freem(m); 2222 return(IPPROTO_DONE); 2223 } 2224 2225 if (ip_rsvpd != NULL) { 2226 *mp = m; 2227 rip_input(mp, offp, proto); 2228 return(IPPROTO_DONE); 2229 } 2230 /* Drop the packet */ 2231 m_freem(m); 2232 return(IPPROTO_DONE); 2233 } 2234