1 /* $OpenBSD: ip_input.c,v 1.394 2024/05/08 13:01:30 bluhm Exp $ */ 2 /* $NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 33 */ 34 35 #include "pf.h" 36 #include "carp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/domain.h> 42 #include <sys/mutex.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/pool.h> 48 #include <sys/task.h> 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_dl.h> 53 #include <net/route.h> 54 #include <net/netisr.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/if_ether.h> 59 #include <netinet/ip.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip_var.h> 63 #include <netinet/ip_icmp.h> 64 #include <net/if_types.h> 65 66 #ifdef INET6 67 #include <netinet6/ip6_var.h> 68 #endif 69 70 #if NPF > 0 71 #include <net/pfvar.h> 72 #endif 73 74 #ifdef MROUTING 75 #include <netinet/ip_mroute.h> 76 #endif 77 78 #ifdef IPSEC 79 #include <netinet/ip_ipsp.h> 80 #endif /* IPSEC */ 81 82 #if NCARP > 0 83 #include <netinet/ip_carp.h> 84 #endif 85 86 /* values controllable via sysctl */ 87 int ipforwarding = 0; 88 int ipmforwarding = 0; 89 int ipmultipath = 0; 90 int ipsendredirects = 1; 91 int ip_dosourceroute = 0; 92 int ip_defttl = IPDEFTTL; 93 int ip_mtudisc = 1; 94 int ip_mtudisc_timeout = IPMTUDISCTIMEOUT; 95 int ip_directedbcast = 0; 96 97 /* Protects `ipq' and `ip_frags'. */ 98 struct mutex ipq_mutex = MUTEX_INITIALIZER(IPL_SOFTNET); 99 100 /* IP reassembly queue */ 101 LIST_HEAD(, ipq) ipq; 102 103 /* Keep track of memory used for reassembly */ 104 int ip_maxqueue = 300; 105 int ip_frags = 0; 106 107 const struct sysctl_bounded_args ipctl_vars[] = { 108 #ifdef MROUTING 109 { IPCTL_MRTPROTO, &ip_mrtproto, SYSCTL_INT_READONLY }, 110 #endif 111 { IPCTL_FORWARDING, &ipforwarding, 0, 2 }, 112 { IPCTL_SENDREDIRECTS, &ipsendredirects, 0, 1 }, 113 { IPCTL_DEFTTL, &ip_defttl, 0, 255 }, 114 { IPCTL_DIRECTEDBCAST, &ip_directedbcast, 0, 1 }, 115 { IPCTL_IPPORT_FIRSTAUTO, &ipport_firstauto, 0, 65535 }, 116 { IPCTL_IPPORT_LASTAUTO, &ipport_lastauto, 0, 65535 }, 117 { IPCTL_IPPORT_HIFIRSTAUTO, &ipport_hifirstauto, 0, 65535 }, 118 { IPCTL_IPPORT_HILASTAUTO, &ipport_hilastauto, 0, 65535 }, 119 { IPCTL_IPPORT_MAXQUEUE, &ip_maxqueue, 0, 10000 }, 120 { IPCTL_MFORWARDING, &ipmforwarding, 0, 1 }, 121 { IPCTL_ARPTIMEOUT, &arpt_keep, 0, INT_MAX }, 122 { IPCTL_ARPDOWN, &arpt_down, 0, INT_MAX }, 123 }; 124 125 struct niqueue ipintrq = NIQUEUE_INITIALIZER(IPQ_MAXLEN, NETISR_IP); 126 127 struct pool ipqent_pool; 128 struct pool ipq_pool; 129 130 struct cpumem *ipcounters; 131 132 int ip_sysctl_ipstat(void *, size_t *, void *); 133 134 static struct mbuf_queue ipsend_mq; 135 static struct mbuf_queue ipsendraw_mq; 136 137 extern struct niqueue arpinq; 138 139 int ip_ours(struct mbuf **, int *, int, int); 140 int ip_dooptions(struct mbuf *, struct ifnet *); 141 int in_ouraddr(struct mbuf *, struct ifnet *, struct route *); 142 143 int ip_fragcheck(struct mbuf **, int *); 144 struct mbuf * ip_reass(struct ipqent *, struct ipq *); 145 void ip_freef(struct ipq *); 146 void ip_flush(void); 147 148 static void ip_send_dispatch(void *); 149 static void ip_sendraw_dispatch(void *); 150 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq); 151 static struct task ipsendraw_task = 152 TASK_INITIALIZER(ip_sendraw_dispatch, &ipsendraw_mq); 153 154 /* 155 * Used to save the IP options in case a protocol wants to respond 156 * to an incoming packet over the same route if the packet got here 157 * using IP source routing. This allows connection establishment and 158 * maintenance when the remote end is on a network that is not known 159 * to us. 160 */ 161 struct ip_srcrt { 162 int isr_nhops; /* number of hops */ 163 struct in_addr isr_dst; /* final destination */ 164 char isr_nop; /* one NOP to align */ 165 char isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */ 166 struct in_addr isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)]; 167 }; 168 169 void save_rte(struct mbuf *, u_char *, struct in_addr); 170 171 /* 172 * IP initialization: fill in IP protocol switch table. 173 * All protocols not implemented in kernel go to raw IP protocol handler. 174 */ 175 void 176 ip_init(void) 177 { 178 const struct protosw *pr; 179 int i; 180 const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP; 181 const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP; 182 const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP; 183 const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP; 184 185 ipcounters = counters_alloc(ips_ncounters); 186 187 pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 188 IPL_SOFTNET, 0, "ipqe", NULL); 189 pool_init(&ipq_pool, sizeof(struct ipq), 0, 190 IPL_SOFTNET, 0, "ipq", NULL); 191 192 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 193 if (pr == NULL) 194 panic("ip_init"); 195 for (i = 0; i < IPPROTO_MAX; i++) 196 ip_protox[i] = pr - inetsw; 197 for (pr = inetdomain.dom_protosw; 198 pr < inetdomain.dom_protoswNPROTOSW; pr++) 199 if (pr->pr_domain->dom_family == PF_INET && 200 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW && 201 pr->pr_protocol < IPPROTO_MAX) 202 ip_protox[pr->pr_protocol] = pr - inetsw; 203 LIST_INIT(&ipq); 204 205 /* Fill in list of ports not to allocate dynamically. */ 206 memset(&baddynamicports, 0, sizeof(baddynamicports)); 207 for (i = 0; defbaddynamicports_tcp[i] != 0; i++) 208 DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]); 209 for (i = 0; defbaddynamicports_udp[i] != 0; i++) 210 DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]); 211 212 /* Fill in list of ports only root can bind to. */ 213 memset(&rootonlyports, 0, sizeof(rootonlyports)); 214 for (i = 0; defrootonlyports_tcp[i] != 0; i++) 215 DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]); 216 for (i = 0; defrootonlyports_udp[i] != 0; i++) 217 DP_SET(rootonlyports.udp, defrootonlyports_udp[i]); 218 219 mq_init(&ipsend_mq, 64, IPL_SOFTNET); 220 mq_init(&ipsendraw_mq, 64, IPL_SOFTNET); 221 222 arpinit(); 223 #ifdef IPSEC 224 ipsec_init(); 225 #endif 226 #ifdef MROUTING 227 rt_timer_queue_init(&ip_mrouterq, MCAST_EXPIRE_FREQUENCY, 228 &mfc_expire_route); 229 #endif 230 } 231 232 /* 233 * Enqueue packet for local delivery. Queuing is used as a boundary 234 * between the network layer (input/forward path) running with 235 * NET_LOCK_SHARED() and the transport layer needing it exclusively. 236 */ 237 int 238 ip_ours(struct mbuf **mp, int *offp, int nxt, int af) 239 { 240 nxt = ip_fragcheck(mp, offp); 241 if (nxt == IPPROTO_DONE) 242 return IPPROTO_DONE; 243 244 /* We are already in a IPv4/IPv6 local deliver loop. */ 245 if (af != AF_UNSPEC) 246 return nxt; 247 248 nxt = ip_deliver(mp, offp, nxt, AF_INET, 1); 249 if (nxt == IPPROTO_DONE) 250 return IPPROTO_DONE; 251 252 /* save values for later, use after dequeue */ 253 if (*offp != sizeof(struct ip)) { 254 struct m_tag *mtag; 255 struct ipoffnxt *ion; 256 257 /* mbuf tags are expensive, but only used for header options */ 258 mtag = m_tag_get(PACKET_TAG_IP_OFFNXT, sizeof(*ion), 259 M_NOWAIT); 260 if (mtag == NULL) { 261 ipstat_inc(ips_idropped); 262 m_freemp(mp); 263 return IPPROTO_DONE; 264 } 265 ion = (struct ipoffnxt *)(mtag + 1); 266 ion->ion_off = *offp; 267 ion->ion_nxt = nxt; 268 269 m_tag_prepend(*mp, mtag); 270 } 271 272 niq_enqueue(&ipintrq, *mp); 273 *mp = NULL; 274 return IPPROTO_DONE; 275 } 276 277 /* 278 * Dequeue and process locally delivered packets. 279 * This is called with exclusive NET_LOCK(). 280 */ 281 void 282 ipintr(void) 283 { 284 struct mbuf *m; 285 286 while ((m = niq_dequeue(&ipintrq)) != NULL) { 287 struct m_tag *mtag; 288 int off, nxt; 289 290 #ifdef DIAGNOSTIC 291 if ((m->m_flags & M_PKTHDR) == 0) 292 panic("ipintr no HDR"); 293 #endif 294 mtag = m_tag_find(m, PACKET_TAG_IP_OFFNXT, NULL); 295 if (mtag != NULL) { 296 struct ipoffnxt *ion; 297 298 ion = (struct ipoffnxt *)(mtag + 1); 299 off = ion->ion_off; 300 nxt = ion->ion_nxt; 301 302 m_tag_delete(m, mtag); 303 } else { 304 struct ip *ip; 305 306 ip = mtod(m, struct ip *); 307 off = ip->ip_hl << 2; 308 nxt = ip->ip_p; 309 } 310 311 nxt = ip_deliver(&m, &off, nxt, AF_INET, 0); 312 KASSERT(nxt == IPPROTO_DONE); 313 } 314 } 315 316 /* 317 * IPv4 input routine. 318 * 319 * Checksum and byte swap header. Process options. Forward or deliver. 320 */ 321 void 322 ipv4_input(struct ifnet *ifp, struct mbuf *m) 323 { 324 int off, nxt; 325 326 off = 0; 327 nxt = ip_input_if(&m, &off, IPPROTO_IPV4, AF_UNSPEC, ifp); 328 KASSERT(nxt == IPPROTO_DONE); 329 } 330 331 struct mbuf * 332 ipv4_check(struct ifnet *ifp, struct mbuf *m) 333 { 334 struct ip *ip; 335 int hlen, len; 336 337 if (m->m_len < sizeof(*ip)) { 338 m = m_pullup(m, sizeof(*ip)); 339 if (m == NULL) { 340 ipstat_inc(ips_toosmall); 341 return (NULL); 342 } 343 } 344 345 ip = mtod(m, struct ip *); 346 if (ip->ip_v != IPVERSION) { 347 ipstat_inc(ips_badvers); 348 goto bad; 349 } 350 351 hlen = ip->ip_hl << 2; 352 if (hlen < sizeof(*ip)) { /* minimum header length */ 353 ipstat_inc(ips_badhlen); 354 goto bad; 355 } 356 if (hlen > m->m_len) { 357 m = m_pullup(m, hlen); 358 if (m == NULL) { 359 ipstat_inc(ips_badhlen); 360 return (NULL); 361 } 362 ip = mtod(m, struct ip *); 363 } 364 365 /* 127/8 must not appear on wire - RFC1122 */ 366 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 367 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 368 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 369 ipstat_inc(ips_badaddr); 370 goto bad; 371 } 372 } 373 374 if (!ISSET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK)) { 375 if (ISSET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_BAD)) { 376 ipstat_inc(ips_badsum); 377 goto bad; 378 } 379 380 ipstat_inc(ips_inswcsum); 381 if (in_cksum(m, hlen) != 0) { 382 ipstat_inc(ips_badsum); 383 goto bad; 384 } 385 386 SET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK); 387 } 388 389 /* Retrieve the packet length. */ 390 len = ntohs(ip->ip_len); 391 392 /* 393 * Convert fields to host representation. 394 */ 395 if (len < hlen) { 396 ipstat_inc(ips_badlen); 397 goto bad; 398 } 399 400 /* 401 * Check that the amount of data in the buffers 402 * is at least as much as the IP header would have us expect. 403 * Trim mbufs if longer than we expect. 404 * Drop packet if shorter than we expect. 405 */ 406 if (m->m_pkthdr.len < len) { 407 ipstat_inc(ips_tooshort); 408 goto bad; 409 } 410 if (m->m_pkthdr.len > len) { 411 if (m->m_len == m->m_pkthdr.len) { 412 m->m_len = len; 413 m->m_pkthdr.len = len; 414 } else 415 m_adj(m, len - m->m_pkthdr.len); 416 } 417 418 return (m); 419 bad: 420 m_freem(m); 421 return (NULL); 422 } 423 424 int 425 ip_input_if(struct mbuf **mp, int *offp, int nxt, int af, struct ifnet *ifp) 426 { 427 struct route ro; 428 struct mbuf *m; 429 struct ip *ip; 430 int hlen; 431 #if NPF > 0 432 struct in_addr odst; 433 #endif 434 int pfrdr = 0; 435 436 KASSERT(*offp == 0); 437 438 ro.ro_rt = NULL; 439 ipstat_inc(ips_total); 440 m = *mp = ipv4_check(ifp, *mp); 441 if (m == NULL) 442 goto bad; 443 444 ip = mtod(m, struct ip *); 445 446 #if NCARP > 0 447 if (carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 448 &ip->ip_dst.s_addr, (ip->ip_p == IPPROTO_ICMP ? 0 : 1))) 449 goto bad; 450 #endif 451 452 #if NPF > 0 453 /* 454 * Packet filter 455 */ 456 odst = ip->ip_dst; 457 if (pf_test(AF_INET, PF_IN, ifp, mp) != PF_PASS) 458 goto bad; 459 m = *mp; 460 if (m == NULL) 461 goto bad; 462 463 ip = mtod(m, struct ip *); 464 pfrdr = odst.s_addr != ip->ip_dst.s_addr; 465 #endif 466 467 hlen = ip->ip_hl << 2; 468 469 /* 470 * Process options and, if not destined for us, 471 * ship it on. ip_dooptions returns 1 when an 472 * error was detected (causing an icmp message 473 * to be sent and the original packet to be freed). 474 */ 475 if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp)) { 476 m = *mp = NULL; 477 goto bad; 478 } 479 480 if (ip->ip_dst.s_addr == INADDR_BROADCAST || 481 ip->ip_dst.s_addr == INADDR_ANY) { 482 nxt = ip_ours(mp, offp, nxt, af); 483 goto out; 484 } 485 486 switch(in_ouraddr(m, ifp, &ro)) { 487 case 2: 488 goto bad; 489 case 1: 490 nxt = ip_ours(mp, offp, nxt, af); 491 goto out; 492 } 493 494 if (IN_MULTICAST(ip->ip_dst.s_addr)) { 495 /* 496 * Make sure M_MCAST is set. It should theoretically 497 * already be there, but let's play safe because upper 498 * layers check for this flag. 499 */ 500 m->m_flags |= M_MCAST; 501 502 #ifdef MROUTING 503 if (ipmforwarding && ip_mrouter[ifp->if_rdomain]) { 504 int error; 505 506 if (m->m_flags & M_EXT) { 507 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 508 ipstat_inc(ips_toosmall); 509 goto bad; 510 } 511 ip = mtod(m, struct ip *); 512 } 513 /* 514 * If we are acting as a multicast router, all 515 * incoming multicast packets are passed to the 516 * kernel-level multicast forwarding function. 517 * The packet is returned (relatively) intact; if 518 * ip_mforward() returns a non-zero value, the packet 519 * must be discarded, else it may be accepted below. 520 * 521 * (The IP ident field is put in the same byte order 522 * as expected when ip_mforward() is called from 523 * ip_output().) 524 */ 525 KERNEL_LOCK(); 526 error = ip_mforward(m, ifp); 527 KERNEL_UNLOCK(); 528 if (error) { 529 ipstat_inc(ips_cantforward); 530 goto bad; 531 } 532 533 /* 534 * The process-level routing daemon needs to receive 535 * all multicast IGMP packets, whether or not this 536 * host belongs to their destination groups. 537 */ 538 if (ip->ip_p == IPPROTO_IGMP) { 539 nxt = ip_ours(mp, offp, nxt, af); 540 goto out; 541 } 542 ipstat_inc(ips_forward); 543 } 544 #endif 545 /* 546 * See if we belong to the destination multicast group on the 547 * arrival interface. 548 */ 549 if (!in_hasmulti(&ip->ip_dst, ifp)) { 550 ipstat_inc(ips_notmember); 551 if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr)) 552 ipstat_inc(ips_cantforward); 553 goto bad; 554 } 555 nxt = ip_ours(mp, offp, nxt, af); 556 goto out; 557 } 558 559 #if NCARP > 0 560 if (ip->ip_p == IPPROTO_ICMP && 561 carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 562 &ip->ip_dst.s_addr, 1)) 563 goto bad; 564 #endif 565 /* 566 * Not for us; forward if possible and desirable. 567 */ 568 if (ipforwarding == 0) { 569 ipstat_inc(ips_cantforward); 570 goto bad; 571 } 572 #ifdef IPSEC 573 if (ipsec_in_use) { 574 int rv; 575 576 rv = ipsec_forward_check(m, hlen, AF_INET); 577 if (rv != 0) { 578 ipstat_inc(ips_cantforward); 579 goto bad; 580 } 581 /* 582 * Fall through, forward packet. Outbound IPsec policy 583 * checking will occur in ip_output(). 584 */ 585 } 586 #endif /* IPSEC */ 587 588 ip_forward(m, ifp, &ro, pfrdr); 589 *mp = NULL; 590 rtfree(ro.ro_rt); 591 return IPPROTO_DONE; 592 bad: 593 nxt = IPPROTO_DONE; 594 m_freemp(mp); 595 out: 596 rtfree(ro.ro_rt); 597 return nxt; 598 } 599 600 int 601 ip_fragcheck(struct mbuf **mp, int *offp) 602 { 603 struct ip *ip; 604 struct ipq *fp; 605 struct ipqent *ipqe; 606 int hlen; 607 uint16_t mff; 608 609 ip = mtod(*mp, struct ip *); 610 hlen = ip->ip_hl << 2; 611 612 /* 613 * If offset or more fragments are set, must reassemble. 614 * Otherwise, nothing need be done. 615 * (We could look in the reassembly queue to see 616 * if the packet was previously fragmented, 617 * but it's not worth the time; just let them time out.) 618 */ 619 if (ISSET(ip->ip_off, htons(IP_OFFMASK | IP_MF))) { 620 if ((*mp)->m_flags & M_EXT) { /* XXX */ 621 if ((*mp = m_pullup(*mp, hlen)) == NULL) { 622 ipstat_inc(ips_toosmall); 623 return IPPROTO_DONE; 624 } 625 ip = mtod(*mp, struct ip *); 626 } 627 628 /* 629 * Adjust ip_len to not reflect header, 630 * set ipqe_mff if more fragments are expected, 631 * convert offset of this to bytes. 632 */ 633 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 634 mff = ISSET(ip->ip_off, htons(IP_MF)); 635 if (mff) { 636 /* 637 * Make sure that fragments have a data length 638 * that's a non-zero multiple of 8 bytes. 639 */ 640 if (ntohs(ip->ip_len) == 0 || 641 (ntohs(ip->ip_len) & 0x7) != 0) { 642 ipstat_inc(ips_badfrags); 643 m_freemp(mp); 644 return IPPROTO_DONE; 645 } 646 } 647 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 648 649 mtx_enter(&ipq_mutex); 650 651 /* 652 * Look for queue of fragments 653 * of this datagram. 654 */ 655 LIST_FOREACH(fp, &ipq, ipq_q) { 656 if (ip->ip_id == fp->ipq_id && 657 ip->ip_src.s_addr == fp->ipq_src.s_addr && 658 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 659 ip->ip_p == fp->ipq_p) 660 break; 661 } 662 663 /* 664 * If datagram marked as having more fragments 665 * or if this is not the first fragment, 666 * attempt reassembly; if it succeeds, proceed. 667 */ 668 if (mff || ip->ip_off) { 669 ipstat_inc(ips_fragments); 670 if (ip_frags + 1 > ip_maxqueue) { 671 ip_flush(); 672 ipstat_inc(ips_rcvmemdrop); 673 goto bad; 674 } 675 676 ipqe = pool_get(&ipqent_pool, PR_NOWAIT); 677 if (ipqe == NULL) { 678 ipstat_inc(ips_rcvmemdrop); 679 goto bad; 680 } 681 ip_frags++; 682 ipqe->ipqe_mff = mff; 683 ipqe->ipqe_m = *mp; 684 ipqe->ipqe_ip = ip; 685 *mp = ip_reass(ipqe, fp); 686 if (*mp == NULL) 687 goto bad; 688 ipstat_inc(ips_reassembled); 689 ip = mtod(*mp, struct ip *); 690 hlen = ip->ip_hl << 2; 691 ip->ip_len = htons(ntohs(ip->ip_len) + hlen); 692 } else { 693 if (fp != NULL) 694 ip_freef(fp); 695 } 696 697 mtx_leave(&ipq_mutex); 698 } 699 700 *offp = hlen; 701 return ip->ip_p; 702 703 bad: 704 mtx_leave(&ipq_mutex); 705 m_freemp(mp); 706 return IPPROTO_DONE; 707 } 708 709 #ifndef INET6 710 #define IPSTAT_INC(name) ipstat_inc(ips_##name) 711 #else 712 #define IPSTAT_INC(name) (af == AF_INET ? \ 713 ipstat_inc(ips_##name) : ip6stat_inc(ip6s_##name)) 714 #endif 715 716 int 717 ip_deliver(struct mbuf **mp, int *offp, int nxt, int af, int shared) 718 { 719 #ifdef INET6 720 int nest = 0; 721 #endif 722 723 /* 724 * Tell launch routine the next header 725 */ 726 IPSTAT_INC(delivered); 727 728 while (nxt != IPPROTO_DONE) { 729 const struct protosw *psw; 730 int naf; 731 732 switch (af) { 733 case AF_INET: 734 psw = &inetsw[ip_protox[nxt]]; 735 break; 736 #ifdef INET6 737 case AF_INET6: 738 psw = &inet6sw[ip6_protox[nxt]]; 739 break; 740 #endif 741 } 742 if (shared && !ISSET(psw->pr_flags, PR_MPINPUT)) { 743 /* delivery not finished, decrement counter, queue */ 744 switch (af) { 745 case AF_INET: 746 counters_dec(ipcounters, ips_delivered); 747 break; 748 #ifdef INET6 749 case AF_INET6: 750 counters_dec(ip6counters, ip6s_delivered); 751 break; 752 #endif 753 } 754 break; 755 } 756 757 #ifdef INET6 758 if (af == AF_INET6 && 759 ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) { 760 ip6stat_inc(ip6s_toomanyhdr); 761 goto bad; 762 } 763 #endif 764 765 /* 766 * protection against faulty packet - there should be 767 * more sanity checks in header chain processing. 768 */ 769 if ((*mp)->m_pkthdr.len < *offp) { 770 IPSTAT_INC(tooshort); 771 goto bad; 772 } 773 774 #ifdef IPSEC 775 if (ipsec_in_use) { 776 if (ipsec_local_check(*mp, *offp, nxt, af) != 0) { 777 IPSTAT_INC(cantforward); 778 goto bad; 779 } 780 } 781 /* Otherwise, just fall through and deliver the packet */ 782 #endif 783 784 switch (nxt) { 785 case IPPROTO_IPV4: 786 naf = AF_INET; 787 ipstat_inc(ips_delivered); 788 break; 789 #ifdef INET6 790 case IPPROTO_IPV6: 791 naf = AF_INET6; 792 ip6stat_inc(ip6s_delivered); 793 break; 794 #endif 795 default: 796 naf = af; 797 break; 798 } 799 nxt = (*psw->pr_input)(mp, offp, nxt, af); 800 af = naf; 801 } 802 return nxt; 803 bad: 804 m_freemp(mp); 805 return IPPROTO_DONE; 806 } 807 #undef IPSTAT_INC 808 809 int 810 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct route *ro) 811 { 812 struct rtentry *rt; 813 struct ip *ip; 814 int match = 0; 815 816 #if NPF > 0 817 switch (pf_ouraddr(m)) { 818 case 0: 819 return (0); 820 case 1: 821 return (1); 822 default: 823 /* pf does not know it */ 824 break; 825 } 826 #endif 827 828 ip = mtod(m, struct ip *); 829 830 rt = route_mpath(ro, &ip->ip_dst, &ip->ip_src, m->m_pkthdr.ph_rtableid); 831 if (rt != NULL) { 832 if (ISSET(rt->rt_flags, RTF_LOCAL)) 833 match = 1; 834 835 /* 836 * If directedbcast is enabled we only consider it local 837 * if it is received on the interface with that address. 838 */ 839 if (ISSET(rt->rt_flags, RTF_BROADCAST) && 840 (!ip_directedbcast || rt->rt_ifidx == ifp->if_index)) { 841 match = 1; 842 843 /* Make sure M_BCAST is set */ 844 m->m_flags |= M_BCAST; 845 } 846 } 847 848 if (!match) { 849 struct ifaddr *ifa; 850 851 /* 852 * No local address or broadcast address found, so check for 853 * ancient classful broadcast addresses. 854 * It must have been broadcast on the link layer, and for an 855 * address on the interface it was received on. 856 */ 857 if (!ISSET(m->m_flags, M_BCAST) || 858 !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr)) 859 return (0); 860 861 if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid)) 862 return (0); 863 /* 864 * The check in the loop assumes you only rx a packet on an UP 865 * interface, and that M_BCAST will only be set on a BROADCAST 866 * interface. 867 */ 868 NET_ASSERT_LOCKED(); 869 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 870 if (ifa->ifa_addr->sa_family != AF_INET) 871 continue; 872 873 if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, 874 ifatoia(ifa)->ia_addr.sin_addr.s_addr)) { 875 match = 1; 876 break; 877 } 878 } 879 } else if (ipforwarding == 0 && rt->rt_ifidx != ifp->if_index && 880 !((ifp->if_flags & IFF_LOOPBACK) || (ifp->if_type == IFT_ENC) || 881 (m->m_pkthdr.pf.flags & PF_TAG_TRANSLATE_LOCALHOST))) { 882 /* received on wrong interface. */ 883 #if NCARP > 0 884 struct ifnet *out_if; 885 886 /* 887 * Virtual IPs on carp interfaces need to be checked also 888 * against the parent interface and other carp interfaces 889 * sharing the same parent. 890 */ 891 out_if = if_get(rt->rt_ifidx); 892 if (!(out_if && carp_strict_addr_chk(out_if, ifp))) { 893 ipstat_inc(ips_wrongif); 894 match = 2; 895 } 896 if_put(out_if); 897 #else 898 ipstat_inc(ips_wrongif); 899 match = 2; 900 #endif 901 } 902 903 return (match); 904 } 905 906 /* 907 * Take incoming datagram fragment and try to 908 * reassemble it into whole datagram. If a chain for 909 * reassembly of this datagram already exists, then it 910 * is given as fp; otherwise have to make a chain. 911 */ 912 struct mbuf * 913 ip_reass(struct ipqent *ipqe, struct ipq *fp) 914 { 915 struct mbuf *m = ipqe->ipqe_m; 916 struct ipqent *nq, *p, *q; 917 struct ip *ip; 918 struct mbuf *t; 919 int hlen = ipqe->ipqe_ip->ip_hl << 2; 920 int i, next; 921 u_int8_t ecn, ecn0; 922 923 MUTEX_ASSERT_LOCKED(&ipq_mutex); 924 925 /* 926 * Presence of header sizes in mbufs 927 * would confuse code below. 928 */ 929 m->m_data += hlen; 930 m->m_len -= hlen; 931 932 /* 933 * If first fragment to arrive, create a reassembly queue. 934 */ 935 if (fp == NULL) { 936 fp = pool_get(&ipq_pool, PR_NOWAIT); 937 if (fp == NULL) 938 goto dropfrag; 939 LIST_INSERT_HEAD(&ipq, fp, ipq_q); 940 fp->ipq_ttl = IPFRAGTTL; 941 fp->ipq_p = ipqe->ipqe_ip->ip_p; 942 fp->ipq_id = ipqe->ipqe_ip->ip_id; 943 LIST_INIT(&fp->ipq_fragq); 944 fp->ipq_src = ipqe->ipqe_ip->ip_src; 945 fp->ipq_dst = ipqe->ipqe_ip->ip_dst; 946 p = NULL; 947 goto insert; 948 } 949 950 /* 951 * Handle ECN by comparing this segment with the first one; 952 * if CE is set, do not lose CE. 953 * drop if CE and not-ECT are mixed for the same packet. 954 */ 955 ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 956 ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 957 if (ecn == IPTOS_ECN_CE) { 958 if (ecn0 == IPTOS_ECN_NOTECT) 959 goto dropfrag; 960 if (ecn0 != IPTOS_ECN_CE) 961 LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |= 962 IPTOS_ECN_CE; 963 } 964 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 965 goto dropfrag; 966 967 /* 968 * Find a segment which begins after this one does. 969 */ 970 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 971 p = q, q = LIST_NEXT(q, ipqe_q)) 972 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) 973 break; 974 975 /* 976 * If there is a preceding segment, it may provide some of 977 * our data already. If so, drop the data from the incoming 978 * segment. If it provides all of our data, drop us. 979 */ 980 if (p != NULL) { 981 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 982 ntohs(ipqe->ipqe_ip->ip_off); 983 if (i > 0) { 984 if (i >= ntohs(ipqe->ipqe_ip->ip_len)) 985 goto dropfrag; 986 m_adj(ipqe->ipqe_m, i); 987 ipqe->ipqe_ip->ip_off = 988 htons(ntohs(ipqe->ipqe_ip->ip_off) + i); 989 ipqe->ipqe_ip->ip_len = 990 htons(ntohs(ipqe->ipqe_ip->ip_len) - i); 991 } 992 } 993 994 /* 995 * While we overlap succeeding segments trim them or, 996 * if they are completely covered, dequeue them. 997 */ 998 for (; q != NULL && 999 ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > 1000 ntohs(q->ipqe_ip->ip_off); q = nq) { 1001 i = (ntohs(ipqe->ipqe_ip->ip_off) + 1002 ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); 1003 if (i < ntohs(q->ipqe_ip->ip_len)) { 1004 q->ipqe_ip->ip_len = 1005 htons(ntohs(q->ipqe_ip->ip_len) - i); 1006 q->ipqe_ip->ip_off = 1007 htons(ntohs(q->ipqe_ip->ip_off) + i); 1008 m_adj(q->ipqe_m, i); 1009 break; 1010 } 1011 nq = LIST_NEXT(q, ipqe_q); 1012 m_freem(q->ipqe_m); 1013 LIST_REMOVE(q, ipqe_q); 1014 pool_put(&ipqent_pool, q); 1015 ip_frags--; 1016 } 1017 1018 insert: 1019 /* 1020 * Stick new segment in its place; 1021 * check for complete reassembly. 1022 */ 1023 if (p == NULL) { 1024 LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 1025 } else { 1026 LIST_INSERT_AFTER(p, ipqe, ipqe_q); 1027 } 1028 next = 0; 1029 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 1030 p = q, q = LIST_NEXT(q, ipqe_q)) { 1031 if (ntohs(q->ipqe_ip->ip_off) != next) 1032 return (0); 1033 next += ntohs(q->ipqe_ip->ip_len); 1034 } 1035 if (p->ipqe_mff) 1036 return (0); 1037 1038 /* 1039 * Reassembly is complete. Check for a bogus message size and 1040 * concatenate fragments. 1041 */ 1042 q = LIST_FIRST(&fp->ipq_fragq); 1043 ip = q->ipqe_ip; 1044 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 1045 ipstat_inc(ips_toolong); 1046 ip_freef(fp); 1047 return (0); 1048 } 1049 m = q->ipqe_m; 1050 t = m->m_next; 1051 m->m_next = 0; 1052 m_cat(m, t); 1053 nq = LIST_NEXT(q, ipqe_q); 1054 pool_put(&ipqent_pool, q); 1055 ip_frags--; 1056 for (q = nq; q != NULL; q = nq) { 1057 t = q->ipqe_m; 1058 nq = LIST_NEXT(q, ipqe_q); 1059 pool_put(&ipqent_pool, q); 1060 ip_frags--; 1061 m_removehdr(t); 1062 m_cat(m, t); 1063 } 1064 1065 /* 1066 * Create header for new ip packet by 1067 * modifying header of first packet; 1068 * dequeue and discard fragment reassembly header. 1069 * Make header visible. 1070 */ 1071 ip->ip_len = htons(next); 1072 ip->ip_src = fp->ipq_src; 1073 ip->ip_dst = fp->ipq_dst; 1074 LIST_REMOVE(fp, ipq_q); 1075 pool_put(&ipq_pool, fp); 1076 m->m_len += (ip->ip_hl << 2); 1077 m->m_data -= (ip->ip_hl << 2); 1078 m_calchdrlen(m); 1079 return (m); 1080 1081 dropfrag: 1082 ipstat_inc(ips_fragdropped); 1083 m_freem(m); 1084 pool_put(&ipqent_pool, ipqe); 1085 ip_frags--; 1086 return (NULL); 1087 } 1088 1089 /* 1090 * Free a fragment reassembly header and all 1091 * associated datagrams. 1092 */ 1093 void 1094 ip_freef(struct ipq *fp) 1095 { 1096 struct ipqent *q; 1097 1098 MUTEX_ASSERT_LOCKED(&ipq_mutex); 1099 1100 while ((q = LIST_FIRST(&fp->ipq_fragq)) != NULL) { 1101 LIST_REMOVE(q, ipqe_q); 1102 m_freem(q->ipqe_m); 1103 pool_put(&ipqent_pool, q); 1104 ip_frags--; 1105 } 1106 LIST_REMOVE(fp, ipq_q); 1107 pool_put(&ipq_pool, fp); 1108 } 1109 1110 /* 1111 * IP timer processing; 1112 * if a timer expires on a reassembly queue, discard it. 1113 */ 1114 void 1115 ip_slowtimo(void) 1116 { 1117 struct ipq *fp, *nfp; 1118 1119 mtx_enter(&ipq_mutex); 1120 LIST_FOREACH_SAFE(fp, &ipq, ipq_q, nfp) { 1121 if (--fp->ipq_ttl == 0) { 1122 ipstat_inc(ips_fragtimeout); 1123 ip_freef(fp); 1124 } 1125 } 1126 mtx_leave(&ipq_mutex); 1127 } 1128 1129 /* 1130 * Flush a bunch of datagram fragments, till we are down to 75%. 1131 */ 1132 void 1133 ip_flush(void) 1134 { 1135 int max = 50; 1136 1137 MUTEX_ASSERT_LOCKED(&ipq_mutex); 1138 1139 while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) { 1140 ipstat_inc(ips_fragdropped); 1141 ip_freef(LIST_FIRST(&ipq)); 1142 } 1143 } 1144 1145 /* 1146 * Do option processing on a datagram, 1147 * possibly discarding it if bad options are encountered, 1148 * or forwarding it if source-routed. 1149 * Returns 1 if packet has been forwarded/freed, 1150 * 0 if the packet should be processed further. 1151 */ 1152 int 1153 ip_dooptions(struct mbuf *m, struct ifnet *ifp) 1154 { 1155 struct ip *ip = mtod(m, struct ip *); 1156 unsigned int rtableid = m->m_pkthdr.ph_rtableid; 1157 struct rtentry *rt; 1158 struct sockaddr_in ipaddr; 1159 u_char *cp; 1160 struct ip_timestamp ipt; 1161 struct in_ifaddr *ia; 1162 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; 1163 struct in_addr sin, dst; 1164 u_int32_t ntime; 1165 1166 dst = ip->ip_dst; 1167 cp = (u_char *)(ip + 1); 1168 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1169 1170 KERNEL_LOCK(); 1171 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1172 opt = cp[IPOPT_OPTVAL]; 1173 if (opt == IPOPT_EOL) 1174 break; 1175 if (opt == IPOPT_NOP) 1176 optlen = 1; 1177 else { 1178 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1179 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1180 goto bad; 1181 } 1182 optlen = cp[IPOPT_OLEN]; 1183 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1184 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1185 goto bad; 1186 } 1187 } 1188 1189 switch (opt) { 1190 1191 default: 1192 break; 1193 1194 /* 1195 * Source routing with record. 1196 * Find interface with current destination address. 1197 * If none on this machine then drop if strictly routed, 1198 * or do nothing if loosely routed. 1199 * Record interface address and bring up next address 1200 * component. If strictly routed make sure next 1201 * address is on directly accessible net. 1202 */ 1203 case IPOPT_LSRR: 1204 case IPOPT_SSRR: 1205 if (!ip_dosourceroute) { 1206 type = ICMP_UNREACH; 1207 code = ICMP_UNREACH_SRCFAIL; 1208 goto bad; 1209 } 1210 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1211 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1212 goto bad; 1213 } 1214 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1215 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1216 goto bad; 1217 } 1218 memset(&ipaddr, 0, sizeof(ipaddr)); 1219 ipaddr.sin_family = AF_INET; 1220 ipaddr.sin_len = sizeof(ipaddr); 1221 ipaddr.sin_addr = ip->ip_dst; 1222 ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr), 1223 m->m_pkthdr.ph_rtableid)); 1224 if (ia == NULL) { 1225 if (opt == IPOPT_SSRR) { 1226 type = ICMP_UNREACH; 1227 code = ICMP_UNREACH_SRCFAIL; 1228 goto bad; 1229 } 1230 /* 1231 * Loose routing, and not at next destination 1232 * yet; nothing to do except forward. 1233 */ 1234 break; 1235 } 1236 off--; /* 0 origin */ 1237 if ((off + sizeof(struct in_addr)) > optlen) { 1238 /* 1239 * End of source route. Should be for us. 1240 */ 1241 save_rte(m, cp, ip->ip_src); 1242 break; 1243 } 1244 1245 /* 1246 * locate outgoing interface 1247 */ 1248 memset(&ipaddr, 0, sizeof(ipaddr)); 1249 ipaddr.sin_family = AF_INET; 1250 ipaddr.sin_len = sizeof(ipaddr); 1251 memcpy(&ipaddr.sin_addr, cp + off, 1252 sizeof(ipaddr.sin_addr)); 1253 /* keep packet in the virtual instance */ 1254 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1255 if (!rtisvalid(rt) || ((opt == IPOPT_SSRR) && 1256 ISSET(rt->rt_flags, RTF_GATEWAY))) { 1257 type = ICMP_UNREACH; 1258 code = ICMP_UNREACH_SRCFAIL; 1259 rtfree(rt); 1260 goto bad; 1261 } 1262 ia = ifatoia(rt->rt_ifa); 1263 memcpy(cp + off, &ia->ia_addr.sin_addr, 1264 sizeof(struct in_addr)); 1265 rtfree(rt); 1266 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1267 ip->ip_dst = ipaddr.sin_addr; 1268 /* 1269 * Let ip_intr's mcast routing check handle mcast pkts 1270 */ 1271 forward = !IN_MULTICAST(ip->ip_dst.s_addr); 1272 break; 1273 1274 case IPOPT_RR: 1275 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1276 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1277 goto bad; 1278 } 1279 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1280 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1281 goto bad; 1282 } 1283 1284 /* 1285 * If no space remains, ignore. 1286 */ 1287 off--; /* 0 origin */ 1288 if ((off + sizeof(struct in_addr)) > optlen) 1289 break; 1290 memset(&ipaddr, 0, sizeof(ipaddr)); 1291 ipaddr.sin_family = AF_INET; 1292 ipaddr.sin_len = sizeof(ipaddr); 1293 ipaddr.sin_addr = ip->ip_dst; 1294 /* 1295 * locate outgoing interface; if we're the destination, 1296 * use the incoming interface (should be same). 1297 * Again keep the packet inside the virtual instance. 1298 */ 1299 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1300 if (!rtisvalid(rt)) { 1301 type = ICMP_UNREACH; 1302 code = ICMP_UNREACH_HOST; 1303 rtfree(rt); 1304 goto bad; 1305 } 1306 ia = ifatoia(rt->rt_ifa); 1307 memcpy(cp + off, &ia->ia_addr.sin_addr, 1308 sizeof(struct in_addr)); 1309 rtfree(rt); 1310 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1311 break; 1312 1313 case IPOPT_TS: 1314 code = cp - (u_char *)ip; 1315 if (optlen < sizeof(struct ip_timestamp)) 1316 goto bad; 1317 memcpy(&ipt, cp, sizeof(struct ip_timestamp)); 1318 if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5) 1319 goto bad; 1320 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) { 1321 if (++ipt.ipt_oflw == 0) 1322 goto bad; 1323 break; 1324 } 1325 memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin); 1326 switch (ipt.ipt_flg) { 1327 1328 case IPOPT_TS_TSONLY: 1329 break; 1330 1331 case IPOPT_TS_TSANDADDR: 1332 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1333 sizeof(struct in_addr) > ipt.ipt_len) 1334 goto bad; 1335 memset(&ipaddr, 0, sizeof(ipaddr)); 1336 ipaddr.sin_family = AF_INET; 1337 ipaddr.sin_len = sizeof(ipaddr); 1338 ipaddr.sin_addr = dst; 1339 ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr), 1340 ifp)); 1341 if (ia == NULL) 1342 continue; 1343 memcpy(&sin, &ia->ia_addr.sin_addr, 1344 sizeof(struct in_addr)); 1345 ipt.ipt_ptr += sizeof(struct in_addr); 1346 break; 1347 1348 case IPOPT_TS_PRESPEC: 1349 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1350 sizeof(struct in_addr) > ipt.ipt_len) 1351 goto bad; 1352 memset(&ipaddr, 0, sizeof(ipaddr)); 1353 ipaddr.sin_family = AF_INET; 1354 ipaddr.sin_len = sizeof(ipaddr); 1355 ipaddr.sin_addr = sin; 1356 if (ifa_ifwithaddr(sintosa(&ipaddr), 1357 m->m_pkthdr.ph_rtableid) == NULL) 1358 continue; 1359 ipt.ipt_ptr += sizeof(struct in_addr); 1360 break; 1361 1362 default: 1363 /* XXX can't take &ipt->ipt_flg */ 1364 code = (u_char *)&ipt.ipt_ptr - 1365 (u_char *)ip + 1; 1366 goto bad; 1367 } 1368 ntime = iptime(); 1369 memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t)); 1370 ipt.ipt_ptr += sizeof(u_int32_t); 1371 } 1372 } 1373 KERNEL_UNLOCK(); 1374 if (forward && ipforwarding > 0) { 1375 ip_forward(m, ifp, NULL, 1); 1376 return (1); 1377 } 1378 return (0); 1379 bad: 1380 KERNEL_UNLOCK(); 1381 icmp_error(m, type, code, 0, 0); 1382 ipstat_inc(ips_badoptions); 1383 return (1); 1384 } 1385 1386 /* 1387 * Save incoming source route for use in replies, 1388 * to be picked up later by ip_srcroute if the receiver is interested. 1389 */ 1390 void 1391 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1392 { 1393 struct ip_srcrt *isr; 1394 struct m_tag *mtag; 1395 unsigned olen; 1396 1397 olen = option[IPOPT_OLEN]; 1398 if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes)) 1399 return; 1400 1401 mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT); 1402 if (mtag == NULL) { 1403 ipstat_inc(ips_idropped); 1404 return; 1405 } 1406 isr = (struct ip_srcrt *)(mtag + 1); 1407 1408 memcpy(isr->isr_hdr, option, olen); 1409 isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1410 isr->isr_dst = dst; 1411 m_tag_prepend(m, mtag); 1412 } 1413 1414 /* 1415 * Retrieve incoming source route for use in replies, 1416 * in the same form used by setsockopt. 1417 * The first hop is placed before the options, will be removed later. 1418 */ 1419 struct mbuf * 1420 ip_srcroute(struct mbuf *m0) 1421 { 1422 struct in_addr *p, *q; 1423 struct mbuf *m; 1424 struct ip_srcrt *isr; 1425 struct m_tag *mtag; 1426 1427 if (!ip_dosourceroute) 1428 return (NULL); 1429 1430 mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL); 1431 if (mtag == NULL) 1432 return (NULL); 1433 isr = (struct ip_srcrt *)(mtag + 1); 1434 1435 if (isr->isr_nhops == 0) 1436 return (NULL); 1437 m = m_get(M_DONTWAIT, MT_SOOPTS); 1438 if (m == NULL) { 1439 ipstat_inc(ips_idropped); 1440 return (NULL); 1441 } 1442 1443 #define OPTSIZ (sizeof(isr->isr_nop) + sizeof(isr->isr_hdr)) 1444 1445 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */ 1446 m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ; 1447 1448 /* 1449 * First save first hop for return route 1450 */ 1451 p = &(isr->isr_routes[isr->isr_nhops - 1]); 1452 *(mtod(m, struct in_addr *)) = *p--; 1453 1454 /* 1455 * Copy option fields and padding (nop) to mbuf. 1456 */ 1457 isr->isr_nop = IPOPT_NOP; 1458 isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF; 1459 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop, 1460 OPTSIZ); 1461 q = (struct in_addr *)(mtod(m, caddr_t) + 1462 sizeof(struct in_addr) + OPTSIZ); 1463 #undef OPTSIZ 1464 /* 1465 * Record return path as an IP source route, 1466 * reversing the path (pointers are now aligned). 1467 */ 1468 while (p >= isr->isr_routes) { 1469 *q++ = *p--; 1470 } 1471 /* 1472 * Last hop goes to final destination. 1473 */ 1474 *q = isr->isr_dst; 1475 m_tag_delete(m0, (struct m_tag *)isr); 1476 return (m); 1477 } 1478 1479 /* 1480 * Strip out IP options, at higher level protocol in the kernel. 1481 */ 1482 void 1483 ip_stripoptions(struct mbuf *m) 1484 { 1485 int i; 1486 struct ip *ip = mtod(m, struct ip *); 1487 caddr_t opts; 1488 int olen; 1489 1490 olen = (ip->ip_hl<<2) - sizeof (struct ip); 1491 opts = (caddr_t)(ip + 1); 1492 i = m->m_len - (sizeof (struct ip) + olen); 1493 memmove(opts, opts + olen, i); 1494 m->m_len -= olen; 1495 if (m->m_flags & M_PKTHDR) 1496 m->m_pkthdr.len -= olen; 1497 ip->ip_hl = sizeof(struct ip) >> 2; 1498 ip->ip_len = htons(ntohs(ip->ip_len) - olen); 1499 } 1500 1501 const u_char inetctlerrmap[PRC_NCMDS] = { 1502 0, 0, 0, 0, 1503 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1504 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1505 EMSGSIZE, EHOSTUNREACH, 0, 0, 1506 0, 0, 0, 0, 1507 ENOPROTOOPT 1508 }; 1509 1510 /* 1511 * Forward a packet. If some error occurs return the sender 1512 * an icmp packet. Note we can't always generate a meaningful 1513 * icmp message because icmp doesn't have a large enough repertoire 1514 * of codes and types. 1515 * 1516 * If not forwarding, just drop the packet. This could be confusing 1517 * if ipforwarding was zero but some routing protocol was advancing 1518 * us as a gateway to somewhere. However, we must let the routing 1519 * protocol deal with that. 1520 * 1521 * The srcrt parameter indicates whether the packet is being forwarded 1522 * via a source route. 1523 */ 1524 void 1525 ip_forward(struct mbuf *m, struct ifnet *ifp, struct route *ro, int srcrt) 1526 { 1527 struct mbuf mfake, *mcopy; 1528 struct ip *ip = mtod(m, struct ip *); 1529 struct route iproute; 1530 struct rtentry *rt; 1531 int error = 0, type = 0, code = 0, destmtu = 0, fake = 0, len; 1532 u_int32_t dest; 1533 1534 dest = 0; 1535 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1536 ipstat_inc(ips_cantforward); 1537 m_freem(m); 1538 goto done; 1539 } 1540 if (ip->ip_ttl <= IPTTLDEC) { 1541 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1542 goto done; 1543 } 1544 1545 if (ro == NULL) { 1546 ro = &iproute; 1547 ro->ro_rt = NULL; 1548 } 1549 rt = route_mpath(ro, &ip->ip_dst, &ip->ip_src, m->m_pkthdr.ph_rtableid); 1550 if (rt == NULL) { 1551 ipstat_inc(ips_noroute); 1552 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 1553 goto done; 1554 } 1555 1556 /* 1557 * Save at most 68 bytes of the packet in case 1558 * we need to generate an ICMP message to the src. 1559 * The data is saved in the mbuf on the stack that 1560 * acts as a temporary storage not intended to be 1561 * passed down the IP stack or to the mfree. 1562 */ 1563 memset(&mfake.m_hdr, 0, sizeof(mfake.m_hdr)); 1564 mfake.m_type = m->m_type; 1565 if (m_dup_pkthdr(&mfake, m, M_DONTWAIT) == 0) { 1566 mfake.m_data = mfake.m_pktdat; 1567 len = min(ntohs(ip->ip_len), 68); 1568 m_copydata(m, 0, len, mfake.m_pktdat); 1569 mfake.m_pkthdr.len = mfake.m_len = len; 1570 #if NPF > 0 1571 pf_pkt_addr_changed(&mfake); 1572 #endif /* NPF > 0 */ 1573 fake = 1; 1574 } 1575 1576 ip->ip_ttl -= IPTTLDEC; 1577 1578 /* 1579 * If forwarding packet using same interface that it came in on, 1580 * perhaps should send a redirect to sender to shortcut a hop. 1581 * Only send redirect if source is sending directly to us, 1582 * and if packet was not source routed (or has any options). 1583 * Also, don't send redirect if forwarding using a default route 1584 * or a route modified by a redirect. 1585 * Don't send redirect if we advertise destination's arp address 1586 * as ours (proxy arp). 1587 */ 1588 if ((rt->rt_ifidx == ifp->if_index) && 1589 (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1590 satosin(rt_key(rt))->sin_addr.s_addr != 0 && 1591 ipsendredirects && !srcrt && 1592 !arpproxy(satosin(rt_key(rt))->sin_addr, m->m_pkthdr.ph_rtableid)) { 1593 if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) == 1594 ifatoia(rt->rt_ifa)->ia_net) { 1595 if (rt->rt_flags & RTF_GATEWAY) 1596 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1597 else 1598 dest = ip->ip_dst.s_addr; 1599 /* Router requirements says to only send host redirects */ 1600 type = ICMP_REDIRECT; 1601 code = ICMP_REDIRECT_HOST; 1602 } 1603 } 1604 1605 error = ip_output(m, NULL, ro, 1606 (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), 1607 NULL, NULL, 0); 1608 rt = ro->ro_rt; 1609 if (error) 1610 ipstat_inc(ips_cantforward); 1611 else { 1612 ipstat_inc(ips_forward); 1613 if (type) 1614 ipstat_inc(ips_redirectsent); 1615 else 1616 goto done; 1617 } 1618 if (!fake) 1619 goto done; 1620 1621 switch (error) { 1622 case 0: /* forwarded, but need redirect */ 1623 /* type, code set above */ 1624 break; 1625 1626 case EMSGSIZE: 1627 type = ICMP_UNREACH; 1628 code = ICMP_UNREACH_NEEDFRAG; 1629 if (rt != NULL) { 1630 if (rt->rt_mtu) { 1631 destmtu = rt->rt_mtu; 1632 } else { 1633 struct ifnet *destifp; 1634 1635 destifp = if_get(rt->rt_ifidx); 1636 if (destifp != NULL) 1637 destmtu = destifp->if_mtu; 1638 if_put(destifp); 1639 } 1640 } 1641 ipstat_inc(ips_cantfrag); 1642 if (destmtu == 0) 1643 goto done; 1644 break; 1645 1646 case EACCES: 1647 /* 1648 * pf(4) blocked the packet. There is no need to send an ICMP 1649 * packet back since pf(4) takes care of it. 1650 */ 1651 goto done; 1652 1653 case ENOBUFS: 1654 /* 1655 * a router should not generate ICMP_SOURCEQUENCH as 1656 * required in RFC1812 Requirements for IP Version 4 Routers. 1657 * source quench could be a big problem under DoS attacks, 1658 * or the underlying interface is rate-limited. 1659 */ 1660 goto done; 1661 1662 case ENETUNREACH: /* shouldn't happen, checked above */ 1663 case EHOSTUNREACH: 1664 case ENETDOWN: 1665 case EHOSTDOWN: 1666 default: 1667 type = ICMP_UNREACH; 1668 code = ICMP_UNREACH_HOST; 1669 break; 1670 } 1671 mcopy = m_copym(&mfake, 0, len, M_DONTWAIT); 1672 if (mcopy != NULL) 1673 icmp_error(mcopy, type, code, dest, destmtu); 1674 1675 done: 1676 if (ro == &iproute) 1677 rtfree(ro->ro_rt); 1678 if (fake) 1679 m_tag_delete_chain(&mfake); 1680 } 1681 1682 int 1683 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1684 size_t newlen) 1685 { 1686 #ifdef MROUTING 1687 extern struct mrtstat mrtstat; 1688 #endif 1689 int oldval, error; 1690 1691 /* Almost all sysctl names at this level are terminal. */ 1692 if (namelen != 1 && name[0] != IPCTL_IFQUEUE && 1693 name[0] != IPCTL_ARPQUEUE) 1694 return (ENOTDIR); 1695 1696 switch (name[0]) { 1697 case IPCTL_SOURCEROUTE: 1698 NET_LOCK(); 1699 error = sysctl_securelevel_int(oldp, oldlenp, newp, newlen, 1700 &ip_dosourceroute); 1701 NET_UNLOCK(); 1702 return (error); 1703 case IPCTL_MTUDISC: 1704 NET_LOCK(); 1705 error = sysctl_int(oldp, oldlenp, newp, newlen, &ip_mtudisc); 1706 if (ip_mtudisc == 0) 1707 rt_timer_queue_flush(&ip_mtudisc_timeout_q); 1708 NET_UNLOCK(); 1709 return error; 1710 case IPCTL_MTUDISCTIMEOUT: 1711 NET_LOCK(); 1712 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 1713 &ip_mtudisc_timeout, 0, INT_MAX); 1714 rt_timer_queue_change(&ip_mtudisc_timeout_q, 1715 ip_mtudisc_timeout); 1716 NET_UNLOCK(); 1717 return (error); 1718 #ifdef IPSEC 1719 case IPCTL_ENCDEBUG: 1720 case IPCTL_IPSEC_STATS: 1721 case IPCTL_IPSEC_EXPIRE_ACQUIRE: 1722 case IPCTL_IPSEC_EMBRYONIC_SA_TIMEOUT: 1723 case IPCTL_IPSEC_REQUIRE_PFS: 1724 case IPCTL_IPSEC_SOFT_ALLOCATIONS: 1725 case IPCTL_IPSEC_ALLOCATIONS: 1726 case IPCTL_IPSEC_SOFT_BYTES: 1727 case IPCTL_IPSEC_BYTES: 1728 case IPCTL_IPSEC_TIMEOUT: 1729 case IPCTL_IPSEC_SOFT_TIMEOUT: 1730 case IPCTL_IPSEC_SOFT_FIRSTUSE: 1731 case IPCTL_IPSEC_FIRSTUSE: 1732 case IPCTL_IPSEC_ENC_ALGORITHM: 1733 case IPCTL_IPSEC_AUTH_ALGORITHM: 1734 case IPCTL_IPSEC_IPCOMP_ALGORITHM: 1735 return (ipsec_sysctl(name, namelen, oldp, oldlenp, newp, 1736 newlen)); 1737 #endif 1738 case IPCTL_IFQUEUE: 1739 return (sysctl_niq(name + 1, namelen - 1, 1740 oldp, oldlenp, newp, newlen, &ipintrq)); 1741 case IPCTL_ARPQUEUE: 1742 return (sysctl_niq(name + 1, namelen - 1, 1743 oldp, oldlenp, newp, newlen, &arpinq)); 1744 case IPCTL_ARPQUEUED: 1745 return (sysctl_rdint(oldp, oldlenp, newp, 1746 atomic_load_int(&la_hold_total))); 1747 case IPCTL_STATS: 1748 return (ip_sysctl_ipstat(oldp, oldlenp, newp)); 1749 #ifdef MROUTING 1750 case IPCTL_MRTSTATS: 1751 return (sysctl_rdstruct(oldp, oldlenp, newp, 1752 &mrtstat, sizeof(mrtstat))); 1753 case IPCTL_MRTMFC: 1754 if (newp) 1755 return (EPERM); 1756 NET_LOCK(); 1757 error = mrt_sysctl_mfc(oldp, oldlenp); 1758 NET_UNLOCK(); 1759 return (error); 1760 case IPCTL_MRTVIF: 1761 if (newp) 1762 return (EPERM); 1763 NET_LOCK(); 1764 error = mrt_sysctl_vif(oldp, oldlenp); 1765 NET_UNLOCK(); 1766 return (error); 1767 #else 1768 case IPCTL_MRTPROTO: 1769 case IPCTL_MRTSTATS: 1770 case IPCTL_MRTMFC: 1771 case IPCTL_MRTVIF: 1772 return (EOPNOTSUPP); 1773 #endif 1774 case IPCTL_MULTIPATH: 1775 NET_LOCK(); 1776 oldval = ipmultipath; 1777 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 1778 &ipmultipath, 0, 1); 1779 if (oldval != ipmultipath) 1780 atomic_inc_long(&rtgeneration); 1781 NET_UNLOCK(); 1782 return (error); 1783 default: 1784 NET_LOCK(); 1785 error = sysctl_bounded_arr(ipctl_vars, nitems(ipctl_vars), 1786 name, namelen, oldp, oldlenp, newp, newlen); 1787 NET_UNLOCK(); 1788 return (error); 1789 } 1790 /* NOTREACHED */ 1791 } 1792 1793 int 1794 ip_sysctl_ipstat(void *oldp, size_t *oldlenp, void *newp) 1795 { 1796 uint64_t counters[ips_ncounters]; 1797 struct ipstat ipstat; 1798 u_long *words = (u_long *)&ipstat; 1799 int i; 1800 1801 CTASSERT(sizeof(ipstat) == (nitems(counters) * sizeof(u_long))); 1802 memset(&ipstat, 0, sizeof ipstat); 1803 counters_read(ipcounters, counters, nitems(counters), NULL); 1804 1805 for (i = 0; i < nitems(counters); i++) 1806 words[i] = (u_long)counters[i]; 1807 1808 return (sysctl_rdstruct(oldp, oldlenp, newp, &ipstat, sizeof(ipstat))); 1809 } 1810 1811 void 1812 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1813 struct mbuf *m) 1814 { 1815 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1816 struct timeval tv; 1817 1818 m_microtime(m, &tv); 1819 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1820 SCM_TIMESTAMP, SOL_SOCKET); 1821 if (*mp) 1822 mp = &(*mp)->m_next; 1823 } 1824 1825 if (inp->inp_flags & INP_RECVDSTADDR) { 1826 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1827 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1828 if (*mp) 1829 mp = &(*mp)->m_next; 1830 } 1831 #ifdef notyet 1832 /* this code is broken and will probably never be fixed. */ 1833 /* options were tossed already */ 1834 if (inp->inp_flags & INP_RECVOPTS) { 1835 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1836 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1837 if (*mp) 1838 mp = &(*mp)->m_next; 1839 } 1840 /* ip_srcroute doesn't do what we want here, need to fix */ 1841 if (inp->inp_flags & INP_RECVRETOPTS) { 1842 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1843 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1844 if (*mp) 1845 mp = &(*mp)->m_next; 1846 } 1847 #endif 1848 if (inp->inp_flags & INP_RECVIF) { 1849 struct sockaddr_dl sdl; 1850 struct ifnet *ifp; 1851 1852 ifp = if_get(m->m_pkthdr.ph_ifidx); 1853 if (ifp == NULL || ifp->if_sadl == NULL) { 1854 memset(&sdl, 0, sizeof(sdl)); 1855 sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); 1856 sdl.sdl_family = AF_LINK; 1857 sdl.sdl_index = ifp != NULL ? ifp->if_index : 0; 1858 sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0; 1859 *mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len, 1860 IP_RECVIF, IPPROTO_IP); 1861 } else { 1862 *mp = sbcreatecontrol((caddr_t) ifp->if_sadl, 1863 ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP); 1864 } 1865 if (*mp) 1866 mp = &(*mp)->m_next; 1867 if_put(ifp); 1868 } 1869 if (inp->inp_flags & INP_RECVTTL) { 1870 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1871 sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP); 1872 if (*mp) 1873 mp = &(*mp)->m_next; 1874 } 1875 if (inp->inp_flags & INP_RECVRTABLE) { 1876 u_int rtableid = inp->inp_rtableid; 1877 1878 #if NPF > 0 1879 if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1880 struct pf_divert *divert; 1881 1882 divert = pf_find_divert(m); 1883 KASSERT(divert != NULL); 1884 rtableid = divert->rdomain; 1885 } 1886 #endif 1887 1888 *mp = sbcreatecontrol((caddr_t) &rtableid, 1889 sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP); 1890 if (*mp) 1891 mp = &(*mp)->m_next; 1892 } 1893 } 1894 1895 void 1896 ip_send_do_dispatch(void *xmq, int flags) 1897 { 1898 struct mbuf_queue *mq = xmq; 1899 struct mbuf *m; 1900 struct mbuf_list ml; 1901 struct m_tag *mtag; 1902 1903 mq_delist(mq, &ml); 1904 if (ml_empty(&ml)) 1905 return; 1906 1907 NET_LOCK_SHARED(); 1908 while ((m = ml_dequeue(&ml)) != NULL) { 1909 u_int32_t ipsecflowinfo = 0; 1910 1911 if ((mtag = m_tag_find(m, PACKET_TAG_IPSEC_FLOWINFO, NULL)) 1912 != NULL) { 1913 ipsecflowinfo = *(u_int32_t *)(mtag + 1); 1914 m_tag_delete(m, mtag); 1915 } 1916 ip_output(m, NULL, NULL, flags, NULL, NULL, ipsecflowinfo); 1917 } 1918 NET_UNLOCK_SHARED(); 1919 } 1920 1921 void 1922 ip_sendraw_dispatch(void *xmq) 1923 { 1924 ip_send_do_dispatch(xmq, IP_RAWOUTPUT); 1925 } 1926 1927 void 1928 ip_send_dispatch(void *xmq) 1929 { 1930 ip_send_do_dispatch(xmq, 0); 1931 } 1932 1933 void 1934 ip_send(struct mbuf *m) 1935 { 1936 mq_enqueue(&ipsend_mq, m); 1937 task_add(net_tq(0), &ipsend_task); 1938 } 1939 1940 void 1941 ip_send_raw(struct mbuf *m) 1942 { 1943 mq_enqueue(&ipsendraw_mq, m); 1944 task_add(net_tq(0), &ipsendraw_task); 1945 } 1946