xref: /openbsd/sys/netinet/ip_input.c (revision 84b2c343)
1 /*	$OpenBSD: ip_input.c,v 1.395 2024/06/07 18:24:16 bluhm Exp $	*/
2 /*	$NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
33  */
34 
35 #include "pf.h"
36 #include "carp.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/domain.h>
42 #include <sys/mutex.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/pool.h>
48 #include <sys/task.h>
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_dl.h>
53 #include <net/route.h>
54 #include <net/netisr.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/in_pcb.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/ip_icmp.h>
64 #include <net/if_types.h>
65 
66 #ifdef INET6
67 #include <netinet6/ip6_var.h>
68 #endif
69 
70 #if NPF > 0
71 #include <net/pfvar.h>
72 #endif
73 
74 #ifdef MROUTING
75 #include <netinet/ip_mroute.h>
76 #endif
77 
78 #ifdef IPSEC
79 #include <netinet/ip_ipsp.h>
80 #endif /* IPSEC */
81 
82 #if NCARP > 0
83 #include <netinet/ip_carp.h>
84 #endif
85 
86 /* values controllable via sysctl */
87 int	ip_forwarding = 0;
88 int	ipmforwarding = 0;
89 int	ipmultipath = 0;
90 int	ip_sendredirects = 1;
91 int	ip_dosourceroute = 0;
92 int	ip_defttl = IPDEFTTL;
93 int	ip_mtudisc = 1;
94 int	ip_mtudisc_timeout = IPMTUDISCTIMEOUT;
95 int	ip_directedbcast = 0;
96 
97 /* Protects `ipq' and `ip_frags'. */
98 struct mutex	ipq_mutex = MUTEX_INITIALIZER(IPL_SOFTNET);
99 
100 /* IP reassembly queue */
101 LIST_HEAD(, ipq) ipq;
102 
103 /* Keep track of memory used for reassembly */
104 int	ip_maxqueue = 300;
105 int	ip_frags = 0;
106 
107 const struct sysctl_bounded_args ipctl_vars[] = {
108 #ifdef MROUTING
109 	{ IPCTL_MRTPROTO, &ip_mrtproto, SYSCTL_INT_READONLY },
110 #endif
111 	{ IPCTL_FORWARDING, &ip_forwarding, 0, 2 },
112 	{ IPCTL_SENDREDIRECTS, &ip_sendredirects, 0, 1 },
113 	{ IPCTL_DEFTTL, &ip_defttl, 0, 255 },
114 	{ IPCTL_DIRECTEDBCAST, &ip_directedbcast, 0, 1 },
115 	{ IPCTL_IPPORT_FIRSTAUTO, &ipport_firstauto, 0, 65535 },
116 	{ IPCTL_IPPORT_LASTAUTO, &ipport_lastauto, 0, 65535 },
117 	{ IPCTL_IPPORT_HIFIRSTAUTO, &ipport_hifirstauto, 0, 65535 },
118 	{ IPCTL_IPPORT_HILASTAUTO, &ipport_hilastauto, 0, 65535 },
119 	{ IPCTL_IPPORT_MAXQUEUE, &ip_maxqueue, 0, 10000 },
120 	{ IPCTL_MFORWARDING, &ipmforwarding, 0, 1 },
121 	{ IPCTL_ARPTIMEOUT, &arpt_keep, 0, INT_MAX },
122 	{ IPCTL_ARPDOWN, &arpt_down, 0, INT_MAX },
123 };
124 
125 struct niqueue ipintrq = NIQUEUE_INITIALIZER(IPQ_MAXLEN, NETISR_IP);
126 
127 struct pool ipqent_pool;
128 struct pool ipq_pool;
129 
130 struct cpumem *ipcounters;
131 
132 int ip_sysctl_ipstat(void *, size_t *, void *);
133 
134 static struct mbuf_queue	ipsend_mq;
135 static struct mbuf_queue	ipsendraw_mq;
136 
137 extern struct niqueue		arpinq;
138 
139 int	ip_ours(struct mbuf **, int *, int, int);
140 int	ip_dooptions(struct mbuf *, struct ifnet *, int);
141 int	in_ouraddr(struct mbuf *, struct ifnet *, struct route *, int);
142 
143 int		ip_fragcheck(struct mbuf **, int *);
144 struct mbuf *	ip_reass(struct ipqent *, struct ipq *);
145 void		ip_freef(struct ipq *);
146 void		ip_flush(void);
147 
148 static void ip_send_dispatch(void *);
149 static void ip_sendraw_dispatch(void *);
150 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq);
151 static struct task ipsendraw_task =
152 	TASK_INITIALIZER(ip_sendraw_dispatch, &ipsendraw_mq);
153 
154 /*
155  * Used to save the IP options in case a protocol wants to respond
156  * to an incoming packet over the same route if the packet got here
157  * using IP source routing.  This allows connection establishment and
158  * maintenance when the remote end is on a network that is not known
159  * to us.
160  */
161 struct ip_srcrt {
162 	int		isr_nhops;		   /* number of hops */
163 	struct in_addr	isr_dst;		   /* final destination */
164 	char		isr_nop;		   /* one NOP to align */
165 	char		isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */
166 	struct in_addr	isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)];
167 };
168 
169 void save_rte(struct mbuf *, u_char *, struct in_addr);
170 
171 /*
172  * IP initialization: fill in IP protocol switch table.
173  * All protocols not implemented in kernel go to raw IP protocol handler.
174  */
175 void
ip_init(void)176 ip_init(void)
177 {
178 	const struct protosw *pr;
179 	int i;
180 	const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP;
181 	const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP;
182 	const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP;
183 	const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP;
184 
185 	ipcounters = counters_alloc(ips_ncounters);
186 
187 	pool_init(&ipqent_pool, sizeof(struct ipqent), 0,
188 	    IPL_SOFTNET, 0, "ipqe",  NULL);
189 	pool_init(&ipq_pool, sizeof(struct ipq), 0,
190 	    IPL_SOFTNET, 0, "ipq", NULL);
191 
192 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
193 	if (pr == NULL)
194 		panic("ip_init");
195 	for (i = 0; i < IPPROTO_MAX; i++)
196 		ip_protox[i] = pr - inetsw;
197 	for (pr = inetdomain.dom_protosw;
198 	    pr < inetdomain.dom_protoswNPROTOSW; pr++)
199 		if (pr->pr_domain->dom_family == PF_INET &&
200 		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW &&
201 		    pr->pr_protocol < IPPROTO_MAX)
202 			ip_protox[pr->pr_protocol] = pr - inetsw;
203 	LIST_INIT(&ipq);
204 
205 	/* Fill in list of ports not to allocate dynamically. */
206 	memset(&baddynamicports, 0, sizeof(baddynamicports));
207 	for (i = 0; defbaddynamicports_tcp[i] != 0; i++)
208 		DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]);
209 	for (i = 0; defbaddynamicports_udp[i] != 0; i++)
210 		DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]);
211 
212 	/* Fill in list of ports only root can bind to. */
213 	memset(&rootonlyports, 0, sizeof(rootonlyports));
214 	for (i = 0; defrootonlyports_tcp[i] != 0; i++)
215 		DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]);
216 	for (i = 0; defrootonlyports_udp[i] != 0; i++)
217 		DP_SET(rootonlyports.udp, defrootonlyports_udp[i]);
218 
219 	mq_init(&ipsend_mq, 64, IPL_SOFTNET);
220 	mq_init(&ipsendraw_mq, 64, IPL_SOFTNET);
221 
222 	arpinit();
223 #ifdef IPSEC
224 	ipsec_init();
225 #endif
226 #ifdef MROUTING
227 	rt_timer_queue_init(&ip_mrouterq, MCAST_EXPIRE_FREQUENCY,
228 	    &mfc_expire_route);
229 #endif
230 }
231 
232 /*
233  * Enqueue packet for local delivery.  Queuing is used as a boundary
234  * between the network layer (input/forward path) running with
235  * NET_LOCK_SHARED() and the transport layer needing it exclusively.
236  */
237 int
ip_ours(struct mbuf ** mp,int * offp,int nxt,int af)238 ip_ours(struct mbuf **mp, int *offp, int nxt, int af)
239 {
240 	nxt = ip_fragcheck(mp, offp);
241 	if (nxt == IPPROTO_DONE)
242 		return IPPROTO_DONE;
243 
244 	/* We are already in a IPv4/IPv6 local deliver loop. */
245 	if (af != AF_UNSPEC)
246 		return nxt;
247 
248 	nxt = ip_deliver(mp, offp, nxt, AF_INET, 1);
249 	if (nxt == IPPROTO_DONE)
250 		return IPPROTO_DONE;
251 
252 	/* save values for later, use after dequeue */
253 	if (*offp != sizeof(struct ip)) {
254 		struct m_tag *mtag;
255 		struct ipoffnxt *ion;
256 
257 		/* mbuf tags are expensive, but only used for header options */
258 		mtag = m_tag_get(PACKET_TAG_IP_OFFNXT, sizeof(*ion),
259 		    M_NOWAIT);
260 		if (mtag == NULL) {
261 			ipstat_inc(ips_idropped);
262 			m_freemp(mp);
263 			return IPPROTO_DONE;
264 		}
265 		ion = (struct ipoffnxt *)(mtag + 1);
266 		ion->ion_off = *offp;
267 		ion->ion_nxt = nxt;
268 
269 		m_tag_prepend(*mp, mtag);
270 	}
271 
272 	niq_enqueue(&ipintrq, *mp);
273 	*mp = NULL;
274 	return IPPROTO_DONE;
275 }
276 
277 /*
278  * Dequeue and process locally delivered packets.
279  * This is called with exclusive NET_LOCK().
280  */
281 void
ipintr(void)282 ipintr(void)
283 {
284 	struct mbuf *m;
285 
286 	while ((m = niq_dequeue(&ipintrq)) != NULL) {
287 		struct m_tag *mtag;
288 		int off, nxt;
289 
290 #ifdef DIAGNOSTIC
291 		if ((m->m_flags & M_PKTHDR) == 0)
292 			panic("ipintr no HDR");
293 #endif
294 		mtag = m_tag_find(m, PACKET_TAG_IP_OFFNXT, NULL);
295 		if (mtag != NULL) {
296 			struct ipoffnxt *ion;
297 
298 			ion = (struct ipoffnxt *)(mtag + 1);
299 			off = ion->ion_off;
300 			nxt = ion->ion_nxt;
301 
302 			m_tag_delete(m, mtag);
303 		} else {
304 			struct ip *ip;
305 
306 			ip = mtod(m, struct ip *);
307 			off = ip->ip_hl << 2;
308 			nxt = ip->ip_p;
309 		}
310 
311 		nxt = ip_deliver(&m, &off, nxt, AF_INET, 0);
312 		KASSERT(nxt == IPPROTO_DONE);
313 	}
314 }
315 
316 /*
317  * IPv4 input routine.
318  *
319  * Checksum and byte swap header.  Process options. Forward or deliver.
320  */
321 void
ipv4_input(struct ifnet * ifp,struct mbuf * m)322 ipv4_input(struct ifnet *ifp, struct mbuf *m)
323 {
324 	int off, nxt;
325 
326 	off = 0;
327 	nxt = ip_input_if(&m, &off, IPPROTO_IPV4, AF_UNSPEC, ifp);
328 	KASSERT(nxt == IPPROTO_DONE);
329 }
330 
331 struct mbuf *
ipv4_check(struct ifnet * ifp,struct mbuf * m)332 ipv4_check(struct ifnet *ifp, struct mbuf *m)
333 {
334 	struct ip *ip;
335 	int hlen, len;
336 
337 	if (m->m_len < sizeof(*ip)) {
338 		m = m_pullup(m, sizeof(*ip));
339 		if (m == NULL) {
340 			ipstat_inc(ips_toosmall);
341 			return (NULL);
342 		}
343 	}
344 
345 	ip = mtod(m, struct ip *);
346 	if (ip->ip_v != IPVERSION) {
347 		ipstat_inc(ips_badvers);
348 		goto bad;
349 	}
350 
351 	hlen = ip->ip_hl << 2;
352 	if (hlen < sizeof(*ip)) {	/* minimum header length */
353 		ipstat_inc(ips_badhlen);
354 		goto bad;
355 	}
356 	if (hlen > m->m_len) {
357 		m = m_pullup(m, hlen);
358 		if (m == NULL) {
359 			ipstat_inc(ips_badhlen);
360 			return (NULL);
361 		}
362 		ip = mtod(m, struct ip *);
363 	}
364 
365 	/* 127/8 must not appear on wire - RFC1122 */
366 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
367 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
368 		if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
369 			ipstat_inc(ips_badaddr);
370 			goto bad;
371 		}
372 	}
373 
374 	if (!ISSET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK)) {
375 		if (ISSET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_BAD)) {
376 			ipstat_inc(ips_badsum);
377 			goto bad;
378 		}
379 
380 		ipstat_inc(ips_inswcsum);
381 		if (in_cksum(m, hlen) != 0) {
382 			ipstat_inc(ips_badsum);
383 			goto bad;
384 		}
385 
386 		SET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK);
387 	}
388 
389 	/* Retrieve the packet length. */
390 	len = ntohs(ip->ip_len);
391 
392 	/*
393 	 * Convert fields to host representation.
394 	 */
395 	if (len < hlen) {
396 		ipstat_inc(ips_badlen);
397 		goto bad;
398 	}
399 
400 	/*
401 	 * Check that the amount of data in the buffers
402 	 * is at least as much as the IP header would have us expect.
403 	 * Trim mbufs if longer than we expect.
404 	 * Drop packet if shorter than we expect.
405 	 */
406 	if (m->m_pkthdr.len < len) {
407 		ipstat_inc(ips_tooshort);
408 		goto bad;
409 	}
410 	if (m->m_pkthdr.len > len) {
411 		if (m->m_len == m->m_pkthdr.len) {
412 			m->m_len = len;
413 			m->m_pkthdr.len = len;
414 		} else
415 			m_adj(m, len - m->m_pkthdr.len);
416 	}
417 
418 	return (m);
419 bad:
420 	m_freem(m);
421 	return (NULL);
422 }
423 
424 int
ip_input_if(struct mbuf ** mp,int * offp,int nxt,int af,struct ifnet * ifp)425 ip_input_if(struct mbuf **mp, int *offp, int nxt, int af, struct ifnet *ifp)
426 {
427 	struct route ro;
428 	struct mbuf *m;
429 	struct ip *ip;
430 	int hlen;
431 #if NPF > 0
432 	struct in_addr odst;
433 #endif
434 	int flags = 0;
435 
436 	KASSERT(*offp == 0);
437 
438 	ro.ro_rt = NULL;
439 	ipstat_inc(ips_total);
440 	m = *mp = ipv4_check(ifp, *mp);
441 	if (m == NULL)
442 		goto bad;
443 
444 	ip = mtod(m, struct ip *);
445 
446 #if NCARP > 0
447 	if (carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr,
448 	    &ip->ip_dst.s_addr, (ip->ip_p == IPPROTO_ICMP ? 0 : 1)))
449 		goto bad;
450 #endif
451 
452 #if NPF > 0
453 	/*
454 	 * Packet filter
455 	 */
456 	odst = ip->ip_dst;
457 	if (pf_test(AF_INET, PF_IN, ifp, mp) != PF_PASS)
458 		goto bad;
459 	m = *mp;
460 	if (m == NULL)
461 		goto bad;
462 
463 	ip = mtod(m, struct ip *);
464 	if (odst.s_addr != ip->ip_dst.s_addr)
465 		SET(flags, IP_REDIRECT);
466 #endif
467 
468 	if (ip_forwarding != 0)
469 		SET(flags, IP_FORWARDING);
470 	if (ip_directedbcast)
471 		SET(flags, IP_ALLOWBROADCAST);
472 
473 	hlen = ip->ip_hl << 2;
474 
475 	/*
476 	 * Process options and, if not destined for us,
477 	 * ship it on.  ip_dooptions returns 1 when an
478 	 * error was detected (causing an icmp message
479 	 * to be sent and the original packet to be freed).
480 	 */
481 	if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp, flags)) {
482 		m = *mp = NULL;
483 		goto bad;
484 	}
485 
486 	if (ip->ip_dst.s_addr == INADDR_BROADCAST ||
487 	    ip->ip_dst.s_addr == INADDR_ANY) {
488 		nxt = ip_ours(mp, offp, nxt, af);
489 		goto out;
490 	}
491 
492 	switch(in_ouraddr(m, ifp, &ro, flags)) {
493 	case 2:
494 		goto bad;
495 	case 1:
496 		nxt = ip_ours(mp, offp, nxt, af);
497 		goto out;
498 	}
499 
500 	if (IN_MULTICAST(ip->ip_dst.s_addr)) {
501 		/*
502 		 * Make sure M_MCAST is set.  It should theoretically
503 		 * already be there, but let's play safe because upper
504 		 * layers check for this flag.
505 		 */
506 		m->m_flags |= M_MCAST;
507 
508 #ifdef MROUTING
509 		if (ipmforwarding && ip_mrouter[ifp->if_rdomain]) {
510 			int error;
511 
512 			if (m->m_flags & M_EXT) {
513 				if ((m = *mp = m_pullup(m, hlen)) == NULL) {
514 					ipstat_inc(ips_toosmall);
515 					goto bad;
516 				}
517 				ip = mtod(m, struct ip *);
518 			}
519 			/*
520 			 * If we are acting as a multicast router, all
521 			 * incoming multicast packets are passed to the
522 			 * kernel-level multicast forwarding function.
523 			 * The packet is returned (relatively) intact; if
524 			 * ip_mforward() returns a non-zero value, the packet
525 			 * must be discarded, else it may be accepted below.
526 			 *
527 			 * (The IP ident field is put in the same byte order
528 			 * as expected when ip_mforward() is called from
529 			 * ip_output().)
530 			 */
531 			KERNEL_LOCK();
532 			error = ip_mforward(m, ifp);
533 			KERNEL_UNLOCK();
534 			if (error) {
535 				ipstat_inc(ips_cantforward);
536 				goto bad;
537 			}
538 
539 			/*
540 			 * The process-level routing daemon needs to receive
541 			 * all multicast IGMP packets, whether or not this
542 			 * host belongs to their destination groups.
543 			 */
544 			if (ip->ip_p == IPPROTO_IGMP) {
545 				nxt = ip_ours(mp, offp, nxt, af);
546 				goto out;
547 			}
548 			ipstat_inc(ips_forward);
549 		}
550 #endif
551 		/*
552 		 * See if we belong to the destination multicast group on the
553 		 * arrival interface.
554 		 */
555 		if (!in_hasmulti(&ip->ip_dst, ifp)) {
556 			ipstat_inc(ips_notmember);
557 			if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr))
558 				ipstat_inc(ips_cantforward);
559 			goto bad;
560 		}
561 		nxt = ip_ours(mp, offp, nxt, af);
562 		goto out;
563 	}
564 
565 #if NCARP > 0
566 	if (ip->ip_p == IPPROTO_ICMP &&
567 	    carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr,
568 	    &ip->ip_dst.s_addr, 1))
569 		goto bad;
570 #endif
571 	/*
572 	 * Not for us; forward if possible and desirable.
573 	 */
574 	if (!ISSET(flags, IP_FORWARDING)) {
575 		ipstat_inc(ips_cantforward);
576 		goto bad;
577 	}
578 #ifdef IPSEC
579 	if (ipsec_in_use) {
580 		int rv;
581 
582 		rv = ipsec_forward_check(m, hlen, AF_INET);
583 		if (rv != 0) {
584 			ipstat_inc(ips_cantforward);
585 			goto bad;
586 		}
587 		/*
588 		 * Fall through, forward packet. Outbound IPsec policy
589 		 * checking will occur in ip_output().
590 		 */
591 	}
592 #endif /* IPSEC */
593 
594 	ip_forward(m, ifp, &ro, flags);
595 	*mp = NULL;
596 	rtfree(ro.ro_rt);
597 	return IPPROTO_DONE;
598  bad:
599 	nxt = IPPROTO_DONE;
600 	m_freemp(mp);
601  out:
602 	rtfree(ro.ro_rt);
603 	return nxt;
604 }
605 
606 int
ip_fragcheck(struct mbuf ** mp,int * offp)607 ip_fragcheck(struct mbuf **mp, int *offp)
608 {
609 	struct ip *ip;
610 	struct ipq *fp;
611 	struct ipqent *ipqe;
612 	int hlen;
613 	uint16_t mff;
614 
615 	ip = mtod(*mp, struct ip *);
616 	hlen = ip->ip_hl << 2;
617 
618 	/*
619 	 * If offset or more fragments are set, must reassemble.
620 	 * Otherwise, nothing need be done.
621 	 * (We could look in the reassembly queue to see
622 	 * if the packet was previously fragmented,
623 	 * but it's not worth the time; just let them time out.)
624 	 */
625 	if (ISSET(ip->ip_off, htons(IP_OFFMASK | IP_MF))) {
626 		if ((*mp)->m_flags & M_EXT) {		/* XXX */
627 			if ((*mp = m_pullup(*mp, hlen)) == NULL) {
628 				ipstat_inc(ips_toosmall);
629 				return IPPROTO_DONE;
630 			}
631 			ip = mtod(*mp, struct ip *);
632 		}
633 
634 		/*
635 		 * Adjust ip_len to not reflect header,
636 		 * set ipqe_mff if more fragments are expected,
637 		 * convert offset of this to bytes.
638 		 */
639 		ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
640 		mff = ISSET(ip->ip_off, htons(IP_MF));
641 		if (mff) {
642 			/*
643 			 * Make sure that fragments have a data length
644 			 * that's a non-zero multiple of 8 bytes.
645 			 */
646 			if (ntohs(ip->ip_len) == 0 ||
647 			    (ntohs(ip->ip_len) & 0x7) != 0) {
648 				ipstat_inc(ips_badfrags);
649 				m_freemp(mp);
650 				return IPPROTO_DONE;
651 			}
652 		}
653 		ip->ip_off = htons(ntohs(ip->ip_off) << 3);
654 
655 		mtx_enter(&ipq_mutex);
656 
657 		/*
658 		 * Look for queue of fragments
659 		 * of this datagram.
660 		 */
661 		LIST_FOREACH(fp, &ipq, ipq_q) {
662 			if (ip->ip_id == fp->ipq_id &&
663 			    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
664 			    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
665 			    ip->ip_p == fp->ipq_p)
666 				break;
667 		}
668 
669 		/*
670 		 * If datagram marked as having more fragments
671 		 * or if this is not the first fragment,
672 		 * attempt reassembly; if it succeeds, proceed.
673 		 */
674 		if (mff || ip->ip_off) {
675 			ipstat_inc(ips_fragments);
676 			if (ip_frags + 1 > ip_maxqueue) {
677 				ip_flush();
678 				ipstat_inc(ips_rcvmemdrop);
679 				goto bad;
680 			}
681 
682 			ipqe = pool_get(&ipqent_pool, PR_NOWAIT);
683 			if (ipqe == NULL) {
684 				ipstat_inc(ips_rcvmemdrop);
685 				goto bad;
686 			}
687 			ip_frags++;
688 			ipqe->ipqe_mff = mff;
689 			ipqe->ipqe_m = *mp;
690 			ipqe->ipqe_ip = ip;
691 			*mp = ip_reass(ipqe, fp);
692 			if (*mp == NULL)
693 				goto bad;
694 			ipstat_inc(ips_reassembled);
695 			ip = mtod(*mp, struct ip *);
696 			hlen = ip->ip_hl << 2;
697 			ip->ip_len = htons(ntohs(ip->ip_len) + hlen);
698 		} else {
699 			if (fp != NULL)
700 				ip_freef(fp);
701 		}
702 
703 		mtx_leave(&ipq_mutex);
704 	}
705 
706 	*offp = hlen;
707 	return ip->ip_p;
708 
709  bad:
710 	mtx_leave(&ipq_mutex);
711 	m_freemp(mp);
712 	return IPPROTO_DONE;
713 }
714 
715 #ifndef INET6
716 #define IPSTAT_INC(name)	ipstat_inc(ips_##name)
717 #else
718 #define IPSTAT_INC(name)	(af == AF_INET ?	\
719     ipstat_inc(ips_##name) : ip6stat_inc(ip6s_##name))
720 #endif
721 
722 int
ip_deliver(struct mbuf ** mp,int * offp,int nxt,int af,int shared)723 ip_deliver(struct mbuf **mp, int *offp, int nxt, int af, int shared)
724 {
725 #ifdef INET6
726 	int nest = 0;
727 #endif
728 
729 	/*
730 	 * Tell launch routine the next header
731 	 */
732 	IPSTAT_INC(delivered);
733 
734 	while (nxt != IPPROTO_DONE) {
735 		const struct protosw *psw;
736 		int naf;
737 
738 		switch (af) {
739 		case AF_INET:
740 			psw = &inetsw[ip_protox[nxt]];
741 			break;
742 #ifdef INET6
743 		case AF_INET6:
744 			psw = &inet6sw[ip6_protox[nxt]];
745 			break;
746 #endif
747 		}
748 		if (shared && !ISSET(psw->pr_flags, PR_MPINPUT)) {
749 			/* delivery not finished, decrement counter, queue */
750 			switch (af) {
751 			case AF_INET:
752 				counters_dec(ipcounters, ips_delivered);
753 				break;
754 #ifdef INET6
755 			case AF_INET6:
756 				counters_dec(ip6counters, ip6s_delivered);
757 				break;
758 #endif
759 			}
760 			break;
761 		}
762 
763 #ifdef INET6
764 		if (af == AF_INET6 &&
765 		    ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) {
766 			ip6stat_inc(ip6s_toomanyhdr);
767 			goto bad;
768 		}
769 #endif
770 
771 		/*
772 		 * protection against faulty packet - there should be
773 		 * more sanity checks in header chain processing.
774 		 */
775 		if ((*mp)->m_pkthdr.len < *offp) {
776 			IPSTAT_INC(tooshort);
777 			goto bad;
778 		}
779 
780 #ifdef IPSEC
781 		if (ipsec_in_use) {
782 			if (ipsec_local_check(*mp, *offp, nxt, af) != 0) {
783 				IPSTAT_INC(cantforward);
784 				goto bad;
785 			}
786 		}
787 		/* Otherwise, just fall through and deliver the packet */
788 #endif
789 
790 		switch (nxt) {
791 		case IPPROTO_IPV4:
792 			naf = AF_INET;
793 			ipstat_inc(ips_delivered);
794 			break;
795 #ifdef INET6
796 		case IPPROTO_IPV6:
797 			naf = AF_INET6;
798 			ip6stat_inc(ip6s_delivered);
799 			break;
800 #endif
801 		default:
802 			naf = af;
803 			break;
804 		}
805 		nxt = (*psw->pr_input)(mp, offp, nxt, af);
806 		af = naf;
807 	}
808 	return nxt;
809  bad:
810 	m_freemp(mp);
811 	return IPPROTO_DONE;
812 }
813 #undef IPSTAT_INC
814 
815 int
in_ouraddr(struct mbuf * m,struct ifnet * ifp,struct route * ro,int flags)816 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct route *ro, int flags)
817 {
818 	struct rtentry		*rt;
819 	struct ip		*ip;
820 	int			 match = 0;
821 
822 #if NPF > 0
823 	switch (pf_ouraddr(m)) {
824 	case 0:
825 		return (0);
826 	case 1:
827 		return (1);
828 	default:
829 		/* pf does not know it */
830 		break;
831 	}
832 #endif
833 
834 	ip = mtod(m, struct ip *);
835 
836 	rt = route_mpath(ro, &ip->ip_dst, &ip->ip_src, m->m_pkthdr.ph_rtableid);
837 	if (rt != NULL) {
838 		if (ISSET(rt->rt_flags, RTF_LOCAL))
839 			match = 1;
840 
841 		/*
842 		 * If directedbcast is enabled we only consider it local
843 		 * if it is received on the interface with that address.
844 		 */
845 		if (ISSET(rt->rt_flags, RTF_BROADCAST) &&
846 		    (!ISSET(flags, IP_ALLOWBROADCAST) ||
847 		    rt->rt_ifidx == ifp->if_index)) {
848 			match = 1;
849 
850 			/* Make sure M_BCAST is set */
851 			m->m_flags |= M_BCAST;
852 		}
853 	}
854 
855 	if (!match) {
856 		struct ifaddr *ifa;
857 
858 		/*
859 		 * No local address or broadcast address found, so check for
860 		 * ancient classful broadcast addresses.
861 		 * It must have been broadcast on the link layer, and for an
862 		 * address on the interface it was received on.
863 		 */
864 		if (!ISSET(m->m_flags, M_BCAST) ||
865 		    !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr))
866 			return (0);
867 
868 		if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid))
869 			return (0);
870 		/*
871 		 * The check in the loop assumes you only rx a packet on an UP
872 		 * interface, and that M_BCAST will only be set on a BROADCAST
873 		 * interface.
874 		 */
875 		NET_ASSERT_LOCKED();
876 		TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
877 			if (ifa->ifa_addr->sa_family != AF_INET)
878 				continue;
879 
880 			if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr,
881 			    ifatoia(ifa)->ia_addr.sin_addr.s_addr)) {
882 				match = 1;
883 				break;
884 			}
885 		}
886 	} else if (!ISSET(flags, IP_FORWARDING) &&
887 	    rt->rt_ifidx != ifp->if_index &&
888 	    !((ifp->if_flags & IFF_LOOPBACK) || (ifp->if_type == IFT_ENC) ||
889 	    (m->m_pkthdr.pf.flags & PF_TAG_TRANSLATE_LOCALHOST))) {
890 		/* received on wrong interface. */
891 #if NCARP > 0
892 		struct ifnet *out_if;
893 
894 		/*
895 		 * Virtual IPs on carp interfaces need to be checked also
896 		 * against the parent interface and other carp interfaces
897 		 * sharing the same parent.
898 		 */
899 		out_if = if_get(rt->rt_ifidx);
900 		if (!(out_if && carp_strict_addr_chk(out_if, ifp))) {
901 			ipstat_inc(ips_wrongif);
902 			match = 2;
903 		}
904 		if_put(out_if);
905 #else
906 		ipstat_inc(ips_wrongif);
907 		match = 2;
908 #endif
909 	}
910 
911 	return (match);
912 }
913 
914 /*
915  * Take incoming datagram fragment and try to
916  * reassemble it into whole datagram.  If a chain for
917  * reassembly of this datagram already exists, then it
918  * is given as fp; otherwise have to make a chain.
919  */
920 struct mbuf *
ip_reass(struct ipqent * ipqe,struct ipq * fp)921 ip_reass(struct ipqent *ipqe, struct ipq *fp)
922 {
923 	struct mbuf *m = ipqe->ipqe_m;
924 	struct ipqent *nq, *p, *q;
925 	struct ip *ip;
926 	struct mbuf *t;
927 	int hlen = ipqe->ipqe_ip->ip_hl << 2;
928 	int i, next;
929 	u_int8_t ecn, ecn0;
930 
931 	MUTEX_ASSERT_LOCKED(&ipq_mutex);
932 
933 	/*
934 	 * Presence of header sizes in mbufs
935 	 * would confuse code below.
936 	 */
937 	m->m_data += hlen;
938 	m->m_len -= hlen;
939 
940 	/*
941 	 * If first fragment to arrive, create a reassembly queue.
942 	 */
943 	if (fp == NULL) {
944 		fp = pool_get(&ipq_pool, PR_NOWAIT);
945 		if (fp == NULL)
946 			goto dropfrag;
947 		LIST_INSERT_HEAD(&ipq, fp, ipq_q);
948 		fp->ipq_ttl = IPFRAGTTL;
949 		fp->ipq_p = ipqe->ipqe_ip->ip_p;
950 		fp->ipq_id = ipqe->ipqe_ip->ip_id;
951 		LIST_INIT(&fp->ipq_fragq);
952 		fp->ipq_src = ipqe->ipqe_ip->ip_src;
953 		fp->ipq_dst = ipqe->ipqe_ip->ip_dst;
954 		p = NULL;
955 		goto insert;
956 	}
957 
958 	/*
959 	 * Handle ECN by comparing this segment with the first one;
960 	 * if CE is set, do not lose CE.
961 	 * drop if CE and not-ECT are mixed for the same packet.
962 	 */
963 	ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK;
964 	ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK;
965 	if (ecn == IPTOS_ECN_CE) {
966 		if (ecn0 == IPTOS_ECN_NOTECT)
967 			goto dropfrag;
968 		if (ecn0 != IPTOS_ECN_CE)
969 			LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |=
970 			    IPTOS_ECN_CE;
971 	}
972 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
973 		goto dropfrag;
974 
975 	/*
976 	 * Find a segment which begins after this one does.
977 	 */
978 	for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL;
979 	    p = q, q = LIST_NEXT(q, ipqe_q))
980 		if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off))
981 			break;
982 
983 	/*
984 	 * If there is a preceding segment, it may provide some of
985 	 * our data already.  If so, drop the data from the incoming
986 	 * segment.  If it provides all of our data, drop us.
987 	 */
988 	if (p != NULL) {
989 		i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) -
990 		    ntohs(ipqe->ipqe_ip->ip_off);
991 		if (i > 0) {
992 			if (i >= ntohs(ipqe->ipqe_ip->ip_len))
993 				goto dropfrag;
994 			m_adj(ipqe->ipqe_m, i);
995 			ipqe->ipqe_ip->ip_off =
996 			    htons(ntohs(ipqe->ipqe_ip->ip_off) + i);
997 			ipqe->ipqe_ip->ip_len =
998 			    htons(ntohs(ipqe->ipqe_ip->ip_len) - i);
999 		}
1000 	}
1001 
1002 	/*
1003 	 * While we overlap succeeding segments trim them or,
1004 	 * if they are completely covered, dequeue them.
1005 	 */
1006 	for (; q != NULL &&
1007 	    ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) >
1008 	    ntohs(q->ipqe_ip->ip_off); q = nq) {
1009 		i = (ntohs(ipqe->ipqe_ip->ip_off) +
1010 		    ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off);
1011 		if (i < ntohs(q->ipqe_ip->ip_len)) {
1012 			q->ipqe_ip->ip_len =
1013 			    htons(ntohs(q->ipqe_ip->ip_len) - i);
1014 			q->ipqe_ip->ip_off =
1015 			    htons(ntohs(q->ipqe_ip->ip_off) + i);
1016 			m_adj(q->ipqe_m, i);
1017 			break;
1018 		}
1019 		nq = LIST_NEXT(q, ipqe_q);
1020 		m_freem(q->ipqe_m);
1021 		LIST_REMOVE(q, ipqe_q);
1022 		pool_put(&ipqent_pool, q);
1023 		ip_frags--;
1024 	}
1025 
1026 insert:
1027 	/*
1028 	 * Stick new segment in its place;
1029 	 * check for complete reassembly.
1030 	 */
1031 	if (p == NULL) {
1032 		LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q);
1033 	} else {
1034 		LIST_INSERT_AFTER(p, ipqe, ipqe_q);
1035 	}
1036 	next = 0;
1037 	for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL;
1038 	    p = q, q = LIST_NEXT(q, ipqe_q)) {
1039 		if (ntohs(q->ipqe_ip->ip_off) != next)
1040 			return (0);
1041 		next += ntohs(q->ipqe_ip->ip_len);
1042 	}
1043 	if (p->ipqe_mff)
1044 		return (0);
1045 
1046 	/*
1047 	 * Reassembly is complete.  Check for a bogus message size and
1048 	 * concatenate fragments.
1049 	 */
1050 	q = LIST_FIRST(&fp->ipq_fragq);
1051 	ip = q->ipqe_ip;
1052 	if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) {
1053 		ipstat_inc(ips_toolong);
1054 		ip_freef(fp);
1055 		return (0);
1056 	}
1057 	m = q->ipqe_m;
1058 	t = m->m_next;
1059 	m->m_next = 0;
1060 	m_cat(m, t);
1061 	nq = LIST_NEXT(q, ipqe_q);
1062 	pool_put(&ipqent_pool, q);
1063 	ip_frags--;
1064 	for (q = nq; q != NULL; q = nq) {
1065 		t = q->ipqe_m;
1066 		nq = LIST_NEXT(q, ipqe_q);
1067 		pool_put(&ipqent_pool, q);
1068 		ip_frags--;
1069 		m_removehdr(t);
1070 		m_cat(m, t);
1071 	}
1072 
1073 	/*
1074 	 * Create header for new ip packet by
1075 	 * modifying header of first packet;
1076 	 * dequeue and discard fragment reassembly header.
1077 	 * Make header visible.
1078 	 */
1079 	ip->ip_len = htons(next);
1080 	ip->ip_src = fp->ipq_src;
1081 	ip->ip_dst = fp->ipq_dst;
1082 	LIST_REMOVE(fp, ipq_q);
1083 	pool_put(&ipq_pool, fp);
1084 	m->m_len += (ip->ip_hl << 2);
1085 	m->m_data -= (ip->ip_hl << 2);
1086 	m_calchdrlen(m);
1087 	return (m);
1088 
1089 dropfrag:
1090 	ipstat_inc(ips_fragdropped);
1091 	m_freem(m);
1092 	pool_put(&ipqent_pool, ipqe);
1093 	ip_frags--;
1094 	return (NULL);
1095 }
1096 
1097 /*
1098  * Free a fragment reassembly header and all
1099  * associated datagrams.
1100  */
1101 void
ip_freef(struct ipq * fp)1102 ip_freef(struct ipq *fp)
1103 {
1104 	struct ipqent *q;
1105 
1106 	MUTEX_ASSERT_LOCKED(&ipq_mutex);
1107 
1108 	while ((q = LIST_FIRST(&fp->ipq_fragq)) != NULL) {
1109 		LIST_REMOVE(q, ipqe_q);
1110 		m_freem(q->ipqe_m);
1111 		pool_put(&ipqent_pool, q);
1112 		ip_frags--;
1113 	}
1114 	LIST_REMOVE(fp, ipq_q);
1115 	pool_put(&ipq_pool, fp);
1116 }
1117 
1118 /*
1119  * IP timer processing;
1120  * if a timer expires on a reassembly queue, discard it.
1121  */
1122 void
ip_slowtimo(void)1123 ip_slowtimo(void)
1124 {
1125 	struct ipq *fp, *nfp;
1126 
1127 	mtx_enter(&ipq_mutex);
1128 	LIST_FOREACH_SAFE(fp, &ipq, ipq_q, nfp) {
1129 		if (--fp->ipq_ttl == 0) {
1130 			ipstat_inc(ips_fragtimeout);
1131 			ip_freef(fp);
1132 		}
1133 	}
1134 	mtx_leave(&ipq_mutex);
1135 }
1136 
1137 /*
1138  * Flush a bunch of datagram fragments, till we are down to 75%.
1139  */
1140 void
ip_flush(void)1141 ip_flush(void)
1142 {
1143 	int max = 50;
1144 
1145 	MUTEX_ASSERT_LOCKED(&ipq_mutex);
1146 
1147 	while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) {
1148 		ipstat_inc(ips_fragdropped);
1149 		ip_freef(LIST_FIRST(&ipq));
1150 	}
1151 }
1152 
1153 /*
1154  * Do option processing on a datagram,
1155  * possibly discarding it if bad options are encountered,
1156  * or forwarding it if source-routed.
1157  * Returns 1 if packet has been forwarded/freed,
1158  * 0 if the packet should be processed further.
1159  */
1160 int
ip_dooptions(struct mbuf * m,struct ifnet * ifp,int flags)1161 ip_dooptions(struct mbuf *m, struct ifnet *ifp, int flags)
1162 {
1163 	struct ip *ip = mtod(m, struct ip *);
1164 	unsigned int rtableid = m->m_pkthdr.ph_rtableid;
1165 	struct rtentry *rt;
1166 	struct sockaddr_in ipaddr;
1167 	u_char *cp;
1168 	struct ip_timestamp ipt;
1169 	struct in_ifaddr *ia;
1170 	int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0;
1171 	struct in_addr sin, dst;
1172 	u_int32_t ntime;
1173 
1174 	dst = ip->ip_dst;
1175 	cp = (u_char *)(ip + 1);
1176 	cnt = (ip->ip_hl << 2) - sizeof (struct ip);
1177 
1178 	KERNEL_LOCK();
1179 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
1180 		opt = cp[IPOPT_OPTVAL];
1181 		if (opt == IPOPT_EOL)
1182 			break;
1183 		if (opt == IPOPT_NOP)
1184 			optlen = 1;
1185 		else {
1186 			if (cnt < IPOPT_OLEN + sizeof(*cp)) {
1187 				code = &cp[IPOPT_OLEN] - (u_char *)ip;
1188 				goto bad;
1189 			}
1190 			optlen = cp[IPOPT_OLEN];
1191 			if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
1192 				code = &cp[IPOPT_OLEN] - (u_char *)ip;
1193 				goto bad;
1194 			}
1195 		}
1196 
1197 		switch (opt) {
1198 
1199 		default:
1200 			break;
1201 
1202 		/*
1203 		 * Source routing with record.
1204 		 * Find interface with current destination address.
1205 		 * If none on this machine then drop if strictly routed,
1206 		 * or do nothing if loosely routed.
1207 		 * Record interface address and bring up next address
1208 		 * component.  If strictly routed make sure next
1209 		 * address is on directly accessible net.
1210 		 */
1211 		case IPOPT_LSRR:
1212 		case IPOPT_SSRR:
1213 			if (!ip_dosourceroute) {
1214 				type = ICMP_UNREACH;
1215 				code = ICMP_UNREACH_SRCFAIL;
1216 				goto bad;
1217 			}
1218 			if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1219 				code = &cp[IPOPT_OLEN] - (u_char *)ip;
1220 				goto bad;
1221 			}
1222 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1223 				code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1224 				goto bad;
1225 			}
1226 			memset(&ipaddr, 0, sizeof(ipaddr));
1227 			ipaddr.sin_family = AF_INET;
1228 			ipaddr.sin_len = sizeof(ipaddr);
1229 			ipaddr.sin_addr = ip->ip_dst;
1230 			ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr),
1231 			    m->m_pkthdr.ph_rtableid));
1232 			if (ia == NULL) {
1233 				if (opt == IPOPT_SSRR) {
1234 					type = ICMP_UNREACH;
1235 					code = ICMP_UNREACH_SRCFAIL;
1236 					goto bad;
1237 				}
1238 				/*
1239 				 * Loose routing, and not at next destination
1240 				 * yet; nothing to do except forward.
1241 				 */
1242 				break;
1243 			}
1244 			off--;			/* 0 origin */
1245 			if ((off + sizeof(struct in_addr)) > optlen) {
1246 				/*
1247 				 * End of source route.  Should be for us.
1248 				 */
1249 				save_rte(m, cp, ip->ip_src);
1250 				break;
1251 			}
1252 
1253 			/*
1254 			 * locate outgoing interface
1255 			 */
1256 			memset(&ipaddr, 0, sizeof(ipaddr));
1257 			ipaddr.sin_family = AF_INET;
1258 			ipaddr.sin_len = sizeof(ipaddr);
1259 			memcpy(&ipaddr.sin_addr, cp + off,
1260 			    sizeof(ipaddr.sin_addr));
1261 			/* keep packet in the virtual instance */
1262 			rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid);
1263 			if (!rtisvalid(rt) || ((opt == IPOPT_SSRR) &&
1264 			    ISSET(rt->rt_flags, RTF_GATEWAY))) {
1265 				type = ICMP_UNREACH;
1266 				code = ICMP_UNREACH_SRCFAIL;
1267 				rtfree(rt);
1268 				goto bad;
1269 			}
1270 			ia = ifatoia(rt->rt_ifa);
1271 			memcpy(cp + off, &ia->ia_addr.sin_addr,
1272 			    sizeof(struct in_addr));
1273 			rtfree(rt);
1274 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1275 			ip->ip_dst = ipaddr.sin_addr;
1276 			/*
1277 			 * Let ip_intr's mcast routing check handle mcast pkts
1278 			 */
1279 			forward = !IN_MULTICAST(ip->ip_dst.s_addr);
1280 			break;
1281 
1282 		case IPOPT_RR:
1283 			if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1284 				code = &cp[IPOPT_OLEN] - (u_char *)ip;
1285 				goto bad;
1286 			}
1287 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1288 				code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1289 				goto bad;
1290 			}
1291 
1292 			/*
1293 			 * If no space remains, ignore.
1294 			 */
1295 			off--;			/* 0 origin */
1296 			if ((off + sizeof(struct in_addr)) > optlen)
1297 				break;
1298 			memset(&ipaddr, 0, sizeof(ipaddr));
1299 			ipaddr.sin_family = AF_INET;
1300 			ipaddr.sin_len = sizeof(ipaddr);
1301 			ipaddr.sin_addr = ip->ip_dst;
1302 			/*
1303 			 * locate outgoing interface; if we're the destination,
1304 			 * use the incoming interface (should be same).
1305 			 * Again keep the packet inside the virtual instance.
1306 			 */
1307 			rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid);
1308 			if (!rtisvalid(rt)) {
1309 				type = ICMP_UNREACH;
1310 				code = ICMP_UNREACH_HOST;
1311 				rtfree(rt);
1312 				goto bad;
1313 			}
1314 			ia = ifatoia(rt->rt_ifa);
1315 			memcpy(cp + off, &ia->ia_addr.sin_addr,
1316 			    sizeof(struct in_addr));
1317 			rtfree(rt);
1318 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1319 			break;
1320 
1321 		case IPOPT_TS:
1322 			code = cp - (u_char *)ip;
1323 			if (optlen < sizeof(struct ip_timestamp))
1324 				goto bad;
1325 			memcpy(&ipt, cp, sizeof(struct ip_timestamp));
1326 			if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5)
1327 				goto bad;
1328 			if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) {
1329 				if (++ipt.ipt_oflw == 0)
1330 					goto bad;
1331 				break;
1332 			}
1333 			memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin);
1334 			switch (ipt.ipt_flg) {
1335 
1336 			case IPOPT_TS_TSONLY:
1337 				break;
1338 
1339 			case IPOPT_TS_TSANDADDR:
1340 				if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) +
1341 				    sizeof(struct in_addr) > ipt.ipt_len)
1342 					goto bad;
1343 				memset(&ipaddr, 0, sizeof(ipaddr));
1344 				ipaddr.sin_family = AF_INET;
1345 				ipaddr.sin_len = sizeof(ipaddr);
1346 				ipaddr.sin_addr = dst;
1347 				ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr),
1348 				    ifp));
1349 				if (ia == NULL)
1350 					continue;
1351 				memcpy(&sin, &ia->ia_addr.sin_addr,
1352 				    sizeof(struct in_addr));
1353 				ipt.ipt_ptr += sizeof(struct in_addr);
1354 				break;
1355 
1356 			case IPOPT_TS_PRESPEC:
1357 				if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) +
1358 				    sizeof(struct in_addr) > ipt.ipt_len)
1359 					goto bad;
1360 				memset(&ipaddr, 0, sizeof(ipaddr));
1361 				ipaddr.sin_family = AF_INET;
1362 				ipaddr.sin_len = sizeof(ipaddr);
1363 				ipaddr.sin_addr = sin;
1364 				if (ifa_ifwithaddr(sintosa(&ipaddr),
1365 				    m->m_pkthdr.ph_rtableid) == NULL)
1366 					continue;
1367 				ipt.ipt_ptr += sizeof(struct in_addr);
1368 				break;
1369 
1370 			default:
1371 				/* XXX can't take &ipt->ipt_flg */
1372 				code = (u_char *)&ipt.ipt_ptr -
1373 				    (u_char *)ip + 1;
1374 				goto bad;
1375 			}
1376 			ntime = iptime();
1377 			memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t));
1378 			ipt.ipt_ptr += sizeof(u_int32_t);
1379 		}
1380 	}
1381 	KERNEL_UNLOCK();
1382 	if (forward && ISSET(flags, IP_FORWARDING)) {
1383 		ip_forward(m, ifp, NULL, flags | IP_REDIRECT);
1384 		return (1);
1385 	}
1386 	return (0);
1387 bad:
1388 	KERNEL_UNLOCK();
1389 	icmp_error(m, type, code, 0, 0);
1390 	ipstat_inc(ips_badoptions);
1391 	return (1);
1392 }
1393 
1394 /*
1395  * Save incoming source route for use in replies,
1396  * to be picked up later by ip_srcroute if the receiver is interested.
1397  */
1398 void
save_rte(struct mbuf * m,u_char * option,struct in_addr dst)1399 save_rte(struct mbuf *m, u_char *option, struct in_addr dst)
1400 {
1401 	struct ip_srcrt *isr;
1402 	struct m_tag *mtag;
1403 	unsigned olen;
1404 
1405 	olen = option[IPOPT_OLEN];
1406 	if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes))
1407 		return;
1408 
1409 	mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT);
1410 	if (mtag == NULL) {
1411 		ipstat_inc(ips_idropped);
1412 		return;
1413 	}
1414 	isr = (struct ip_srcrt *)(mtag + 1);
1415 
1416 	memcpy(isr->isr_hdr, option, olen);
1417 	isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
1418 	isr->isr_dst = dst;
1419 	m_tag_prepend(m, mtag);
1420 }
1421 
1422 /*
1423  * Retrieve incoming source route for use in replies,
1424  * in the same form used by setsockopt.
1425  * The first hop is placed before the options, will be removed later.
1426  */
1427 struct mbuf *
ip_srcroute(struct mbuf * m0)1428 ip_srcroute(struct mbuf *m0)
1429 {
1430 	struct in_addr *p, *q;
1431 	struct mbuf *m;
1432 	struct ip_srcrt *isr;
1433 	struct m_tag *mtag;
1434 
1435 	if (!ip_dosourceroute)
1436 		return (NULL);
1437 
1438 	mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL);
1439 	if (mtag == NULL)
1440 		return (NULL);
1441 	isr = (struct ip_srcrt *)(mtag + 1);
1442 
1443 	if (isr->isr_nhops == 0)
1444 		return (NULL);
1445 	m = m_get(M_DONTWAIT, MT_SOOPTS);
1446 	if (m == NULL) {
1447 		ipstat_inc(ips_idropped);
1448 		return (NULL);
1449 	}
1450 
1451 #define OPTSIZ	(sizeof(isr->isr_nop) + sizeof(isr->isr_hdr))
1452 
1453 	/* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */
1454 	m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ;
1455 
1456 	/*
1457 	 * First save first hop for return route
1458 	 */
1459 	p = &(isr->isr_routes[isr->isr_nhops - 1]);
1460 	*(mtod(m, struct in_addr *)) = *p--;
1461 
1462 	/*
1463 	 * Copy option fields and padding (nop) to mbuf.
1464 	 */
1465 	isr->isr_nop = IPOPT_NOP;
1466 	isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF;
1467 	memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop,
1468 	    OPTSIZ);
1469 	q = (struct in_addr *)(mtod(m, caddr_t) +
1470 	    sizeof(struct in_addr) + OPTSIZ);
1471 #undef OPTSIZ
1472 	/*
1473 	 * Record return path as an IP source route,
1474 	 * reversing the path (pointers are now aligned).
1475 	 */
1476 	while (p >= isr->isr_routes) {
1477 		*q++ = *p--;
1478 	}
1479 	/*
1480 	 * Last hop goes to final destination.
1481 	 */
1482 	*q = isr->isr_dst;
1483 	m_tag_delete(m0, (struct m_tag *)isr);
1484 	return (m);
1485 }
1486 
1487 /*
1488  * Strip out IP options, at higher level protocol in the kernel.
1489  */
1490 void
ip_stripoptions(struct mbuf * m)1491 ip_stripoptions(struct mbuf *m)
1492 {
1493 	int i;
1494 	struct ip *ip = mtod(m, struct ip *);
1495 	caddr_t opts;
1496 	int olen;
1497 
1498 	olen = (ip->ip_hl<<2) - sizeof (struct ip);
1499 	opts = (caddr_t)(ip + 1);
1500 	i = m->m_len - (sizeof (struct ip) + olen);
1501 	memmove(opts, opts  + olen, i);
1502 	m->m_len -= olen;
1503 	if (m->m_flags & M_PKTHDR)
1504 		m->m_pkthdr.len -= olen;
1505 	ip->ip_hl = sizeof(struct ip) >> 2;
1506 	ip->ip_len = htons(ntohs(ip->ip_len) - olen);
1507 }
1508 
1509 const u_char inetctlerrmap[PRC_NCMDS] = {
1510 	0,		0,		0,		0,
1511 	0,		EMSGSIZE,	EHOSTDOWN,	EHOSTUNREACH,
1512 	EHOSTUNREACH,	EHOSTUNREACH,	ECONNREFUSED,	ECONNREFUSED,
1513 	EMSGSIZE,	EHOSTUNREACH,	0,		0,
1514 	0,		0,		0,		0,
1515 	ENOPROTOOPT
1516 };
1517 
1518 /*
1519  * Forward a packet.  If some error occurs return the sender
1520  * an icmp packet.  Note we can't always generate a meaningful
1521  * icmp message because icmp doesn't have a large enough repertoire
1522  * of codes and types.
1523  *
1524  * If not forwarding, just drop the packet.  This could be confusing
1525  * if ip_forwarding was zero but some routing protocol was advancing
1526  * us as a gateway to somewhere.  However, we must let the routing
1527  * protocol deal with that.
1528  *
1529  * The srcrt parameter indicates whether the packet is being forwarded
1530  * via a source route.
1531  */
1532 void
ip_forward(struct mbuf * m,struct ifnet * ifp,struct route * ro,int flags)1533 ip_forward(struct mbuf *m, struct ifnet *ifp, struct route *ro, int flags)
1534 {
1535 	struct mbuf mfake, *mcopy;
1536 	struct ip *ip = mtod(m, struct ip *);
1537 	struct route iproute;
1538 	struct rtentry *rt;
1539 	int error = 0, type = 0, code = 0, destmtu = 0, fake = 0, len;
1540 	u_int32_t dest;
1541 
1542 	dest = 0;
1543 	if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1544 		ipstat_inc(ips_cantforward);
1545 		m_freem(m);
1546 		goto done;
1547 	}
1548 	if (ip->ip_ttl <= IPTTLDEC) {
1549 		icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0);
1550 		goto done;
1551 	}
1552 
1553 	if (ro == NULL) {
1554 		ro = &iproute;
1555 		ro->ro_rt = NULL;
1556 	}
1557 	rt = route_mpath(ro, &ip->ip_dst, &ip->ip_src, m->m_pkthdr.ph_rtableid);
1558 	if (rt == NULL) {
1559 		ipstat_inc(ips_noroute);
1560 		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
1561 		goto done;
1562 	}
1563 
1564 	/*
1565 	 * Save at most 68 bytes of the packet in case
1566 	 * we need to generate an ICMP message to the src.
1567 	 * The data is saved in the mbuf on the stack that
1568 	 * acts as a temporary storage not intended to be
1569 	 * passed down the IP stack or to the mfree.
1570 	 */
1571 	memset(&mfake.m_hdr, 0, sizeof(mfake.m_hdr));
1572 	mfake.m_type = m->m_type;
1573 	if (m_dup_pkthdr(&mfake, m, M_DONTWAIT) == 0) {
1574 		mfake.m_data = mfake.m_pktdat;
1575 		len = min(ntohs(ip->ip_len), 68);
1576 		m_copydata(m, 0, len, mfake.m_pktdat);
1577 		mfake.m_pkthdr.len = mfake.m_len = len;
1578 #if NPF > 0
1579 		pf_pkt_addr_changed(&mfake);
1580 #endif	/* NPF > 0 */
1581 		fake = 1;
1582 	}
1583 
1584 	ip->ip_ttl -= IPTTLDEC;
1585 
1586 	/*
1587 	 * If forwarding packet using same interface that it came in on,
1588 	 * perhaps should send a redirect to sender to shortcut a hop.
1589 	 * Only send redirect if source is sending directly to us,
1590 	 * and if packet was not source routed (or has any options).
1591 	 * Also, don't send redirect if forwarding using a default route
1592 	 * or a route modified by a redirect.
1593 	 * Don't send redirect if we advertise destination's arp address
1594 	 * as ours (proxy arp).
1595 	 */
1596 	if ((rt->rt_ifidx == ifp->if_index) &&
1597 	    (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1598 	    satosin(rt_key(rt))->sin_addr.s_addr != 0 &&
1599 	    ip_sendredirects && !ISSET(flags, IP_REDIRECT) &&
1600 	    !arpproxy(satosin(rt_key(rt))->sin_addr, m->m_pkthdr.ph_rtableid)) {
1601 		if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) ==
1602 		    ifatoia(rt->rt_ifa)->ia_net) {
1603 		    if (rt->rt_flags & RTF_GATEWAY)
1604 			dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
1605 		    else
1606 			dest = ip->ip_dst.s_addr;
1607 		    /* Router requirements says to only send host redirects */
1608 		    type = ICMP_REDIRECT;
1609 		    code = ICMP_REDIRECT_HOST;
1610 		}
1611 	}
1612 
1613 	error = ip_output(m, NULL, ro, flags | IP_FORWARDING, NULL, NULL, 0);
1614 	rt = ro->ro_rt;
1615 	if (error)
1616 		ipstat_inc(ips_cantforward);
1617 	else {
1618 		ipstat_inc(ips_forward);
1619 		if (type)
1620 			ipstat_inc(ips_redirectsent);
1621 		else
1622 			goto done;
1623 	}
1624 	if (!fake)
1625 		goto done;
1626 
1627 	switch (error) {
1628 	case 0:				/* forwarded, but need redirect */
1629 		/* type, code set above */
1630 		break;
1631 
1632 	case EMSGSIZE:
1633 		type = ICMP_UNREACH;
1634 		code = ICMP_UNREACH_NEEDFRAG;
1635 		if (rt != NULL) {
1636 			if (rt->rt_mtu) {
1637 				destmtu = rt->rt_mtu;
1638 			} else {
1639 				struct ifnet *destifp;
1640 
1641 				destifp = if_get(rt->rt_ifidx);
1642 				if (destifp != NULL)
1643 					destmtu = destifp->if_mtu;
1644 				if_put(destifp);
1645 			}
1646 		}
1647 		ipstat_inc(ips_cantfrag);
1648 		if (destmtu == 0)
1649 			goto done;
1650 		break;
1651 
1652 	case EACCES:
1653 		/*
1654 		 * pf(4) blocked the packet. There is no need to send an ICMP
1655 		 * packet back since pf(4) takes care of it.
1656 		 */
1657 		goto done;
1658 
1659 	case ENOBUFS:
1660 		/*
1661 		 * a router should not generate ICMP_SOURCEQUENCH as
1662 		 * required in RFC1812 Requirements for IP Version 4 Routers.
1663 		 * source quench could be a big problem under DoS attacks,
1664 		 * or the underlying interface is rate-limited.
1665 		 */
1666 		goto done;
1667 
1668 	case ENETUNREACH:		/* shouldn't happen, checked above */
1669 	case EHOSTUNREACH:
1670 	case ENETDOWN:
1671 	case EHOSTDOWN:
1672 	default:
1673 		type = ICMP_UNREACH;
1674 		code = ICMP_UNREACH_HOST;
1675 		break;
1676 	}
1677 	mcopy = m_copym(&mfake, 0, len, M_DONTWAIT);
1678 	if (mcopy != NULL)
1679 		icmp_error(mcopy, type, code, dest, destmtu);
1680 
1681  done:
1682 	if (ro == &iproute)
1683 		rtfree(ro->ro_rt);
1684 	if (fake)
1685 		m_tag_delete_chain(&mfake);
1686 }
1687 
1688 int
ip_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1689 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1690     size_t newlen)
1691 {
1692 #ifdef MROUTING
1693 	extern struct mrtstat mrtstat;
1694 #endif
1695 	int oldval, error;
1696 
1697 	/* Almost all sysctl names at this level are terminal. */
1698 	if (namelen != 1 && name[0] != IPCTL_IFQUEUE &&
1699 	    name[0] != IPCTL_ARPQUEUE)
1700 		return (ENOTDIR);
1701 
1702 	switch (name[0]) {
1703 	case IPCTL_SOURCEROUTE:
1704 		NET_LOCK();
1705 		error = sysctl_securelevel_int(oldp, oldlenp, newp, newlen,
1706 		    &ip_dosourceroute);
1707 		NET_UNLOCK();
1708 		return (error);
1709 	case IPCTL_MTUDISC:
1710 		NET_LOCK();
1711 		error = sysctl_int(oldp, oldlenp, newp, newlen, &ip_mtudisc);
1712 		if (ip_mtudisc == 0)
1713 			rt_timer_queue_flush(&ip_mtudisc_timeout_q);
1714 		NET_UNLOCK();
1715 		return error;
1716 	case IPCTL_MTUDISCTIMEOUT:
1717 		NET_LOCK();
1718 		error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
1719 		    &ip_mtudisc_timeout, 0, INT_MAX);
1720 		rt_timer_queue_change(&ip_mtudisc_timeout_q,
1721 		    ip_mtudisc_timeout);
1722 		NET_UNLOCK();
1723 		return (error);
1724 #ifdef IPSEC
1725 	case IPCTL_ENCDEBUG:
1726 	case IPCTL_IPSEC_STATS:
1727 	case IPCTL_IPSEC_EXPIRE_ACQUIRE:
1728 	case IPCTL_IPSEC_EMBRYONIC_SA_TIMEOUT:
1729 	case IPCTL_IPSEC_REQUIRE_PFS:
1730 	case IPCTL_IPSEC_SOFT_ALLOCATIONS:
1731 	case IPCTL_IPSEC_ALLOCATIONS:
1732 	case IPCTL_IPSEC_SOFT_BYTES:
1733 	case IPCTL_IPSEC_BYTES:
1734 	case IPCTL_IPSEC_TIMEOUT:
1735 	case IPCTL_IPSEC_SOFT_TIMEOUT:
1736 	case IPCTL_IPSEC_SOFT_FIRSTUSE:
1737 	case IPCTL_IPSEC_FIRSTUSE:
1738 	case IPCTL_IPSEC_ENC_ALGORITHM:
1739 	case IPCTL_IPSEC_AUTH_ALGORITHM:
1740 	case IPCTL_IPSEC_IPCOMP_ALGORITHM:
1741 		return (ipsec_sysctl(name, namelen, oldp, oldlenp, newp,
1742 		    newlen));
1743 #endif
1744 	case IPCTL_IFQUEUE:
1745 		return (sysctl_niq(name + 1, namelen - 1,
1746 		    oldp, oldlenp, newp, newlen, &ipintrq));
1747 	case IPCTL_ARPQUEUE:
1748 		return (sysctl_niq(name + 1, namelen - 1,
1749 		    oldp, oldlenp, newp, newlen, &arpinq));
1750 	case IPCTL_ARPQUEUED:
1751 		return (sysctl_rdint(oldp, oldlenp, newp,
1752 		    atomic_load_int(&la_hold_total)));
1753 	case IPCTL_STATS:
1754 		return (ip_sysctl_ipstat(oldp, oldlenp, newp));
1755 #ifdef MROUTING
1756 	case IPCTL_MRTSTATS:
1757 		return (sysctl_rdstruct(oldp, oldlenp, newp,
1758 		    &mrtstat, sizeof(mrtstat)));
1759 	case IPCTL_MRTMFC:
1760 		if (newp)
1761 			return (EPERM);
1762 		NET_LOCK();
1763 		error = mrt_sysctl_mfc(oldp, oldlenp);
1764 		NET_UNLOCK();
1765 		return (error);
1766 	case IPCTL_MRTVIF:
1767 		if (newp)
1768 			return (EPERM);
1769 		NET_LOCK();
1770 		error = mrt_sysctl_vif(oldp, oldlenp);
1771 		NET_UNLOCK();
1772 		return (error);
1773 #else
1774 	case IPCTL_MRTPROTO:
1775 	case IPCTL_MRTSTATS:
1776 	case IPCTL_MRTMFC:
1777 	case IPCTL_MRTVIF:
1778 		return (EOPNOTSUPP);
1779 #endif
1780 	case IPCTL_MULTIPATH:
1781 		NET_LOCK();
1782 		oldval = ipmultipath;
1783 		error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
1784 		    &ipmultipath, 0, 1);
1785 		if (oldval != ipmultipath)
1786 			atomic_inc_long(&rtgeneration);
1787 		NET_UNLOCK();
1788 		return (error);
1789 	default:
1790 		NET_LOCK();
1791 		error = sysctl_bounded_arr(ipctl_vars, nitems(ipctl_vars),
1792 		    name, namelen, oldp, oldlenp, newp, newlen);
1793 		NET_UNLOCK();
1794 		return (error);
1795 	}
1796 	/* NOTREACHED */
1797 }
1798 
1799 int
ip_sysctl_ipstat(void * oldp,size_t * oldlenp,void * newp)1800 ip_sysctl_ipstat(void *oldp, size_t *oldlenp, void *newp)
1801 {
1802 	uint64_t counters[ips_ncounters];
1803 	struct ipstat ipstat;
1804 	u_long *words = (u_long *)&ipstat;
1805 	int i;
1806 
1807 	CTASSERT(sizeof(ipstat) == (nitems(counters) * sizeof(u_long)));
1808 	memset(&ipstat, 0, sizeof ipstat);
1809 	counters_read(ipcounters, counters, nitems(counters), NULL);
1810 
1811 	for (i = 0; i < nitems(counters); i++)
1812 		words[i] = (u_long)counters[i];
1813 
1814 	return (sysctl_rdstruct(oldp, oldlenp, newp, &ipstat, sizeof(ipstat)));
1815 }
1816 
1817 void
ip_savecontrol(struct inpcb * inp,struct mbuf ** mp,struct ip * ip,struct mbuf * m)1818 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
1819     struct mbuf *m)
1820 {
1821 	if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1822 		struct timeval tv;
1823 
1824 		m_microtime(m, &tv);
1825 		*mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
1826 		    SCM_TIMESTAMP, SOL_SOCKET);
1827 		if (*mp)
1828 			mp = &(*mp)->m_next;
1829 	}
1830 
1831 	if (inp->inp_flags & INP_RECVDSTADDR) {
1832 		*mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
1833 		    sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1834 		if (*mp)
1835 			mp = &(*mp)->m_next;
1836 	}
1837 #ifdef notyet
1838 	/* this code is broken and will probably never be fixed. */
1839 	/* options were tossed already */
1840 	if (inp->inp_flags & INP_RECVOPTS) {
1841 		*mp = sbcreatecontrol((caddr_t) opts_deleted_above,
1842 		    sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1843 		if (*mp)
1844 			mp = &(*mp)->m_next;
1845 	}
1846 	/* ip_srcroute doesn't do what we want here, need to fix */
1847 	if (inp->inp_flags & INP_RECVRETOPTS) {
1848 		*mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
1849 		    sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1850 		if (*mp)
1851 			mp = &(*mp)->m_next;
1852 	}
1853 #endif
1854 	if (inp->inp_flags & INP_RECVIF) {
1855 		struct sockaddr_dl sdl;
1856 		struct ifnet *ifp;
1857 
1858 		ifp = if_get(m->m_pkthdr.ph_ifidx);
1859 		if (ifp == NULL || ifp->if_sadl == NULL) {
1860 			memset(&sdl, 0, sizeof(sdl));
1861 			sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]);
1862 			sdl.sdl_family = AF_LINK;
1863 			sdl.sdl_index = ifp != NULL ? ifp->if_index : 0;
1864 			sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0;
1865 			*mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len,
1866 			    IP_RECVIF, IPPROTO_IP);
1867 		} else {
1868 			*mp = sbcreatecontrol((caddr_t) ifp->if_sadl,
1869 			    ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP);
1870 		}
1871 		if (*mp)
1872 			mp = &(*mp)->m_next;
1873 		if_put(ifp);
1874 	}
1875 	if (inp->inp_flags & INP_RECVTTL) {
1876 		*mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
1877 		    sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP);
1878 		if (*mp)
1879 			mp = &(*mp)->m_next;
1880 	}
1881 	if (inp->inp_flags & INP_RECVRTABLE) {
1882 		u_int rtableid = inp->inp_rtableid;
1883 
1884 #if NPF > 0
1885 		if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) {
1886 			struct pf_divert *divert;
1887 
1888 			divert = pf_find_divert(m);
1889 			KASSERT(divert != NULL);
1890 			rtableid = divert->rdomain;
1891 		}
1892 #endif
1893 
1894 		*mp = sbcreatecontrol((caddr_t) &rtableid,
1895 		    sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP);
1896 		if (*mp)
1897 			mp = &(*mp)->m_next;
1898 	}
1899 }
1900 
1901 void
ip_send_do_dispatch(void * xmq,int flags)1902 ip_send_do_dispatch(void *xmq, int flags)
1903 {
1904 	struct mbuf_queue *mq = xmq;
1905 	struct mbuf *m;
1906 	struct mbuf_list ml;
1907 	struct m_tag *mtag;
1908 
1909 	mq_delist(mq, &ml);
1910 	if (ml_empty(&ml))
1911 		return;
1912 
1913 	NET_LOCK_SHARED();
1914 	while ((m = ml_dequeue(&ml)) != NULL) {
1915 		u_int32_t ipsecflowinfo = 0;
1916 
1917 		if ((mtag = m_tag_find(m, PACKET_TAG_IPSEC_FLOWINFO, NULL))
1918 		    != NULL) {
1919 			ipsecflowinfo = *(u_int32_t *)(mtag + 1);
1920 			m_tag_delete(m, mtag);
1921 		}
1922 		ip_output(m, NULL, NULL, flags, NULL, NULL, ipsecflowinfo);
1923 	}
1924 	NET_UNLOCK_SHARED();
1925 }
1926 
1927 void
ip_sendraw_dispatch(void * xmq)1928 ip_sendraw_dispatch(void *xmq)
1929 {
1930 	ip_send_do_dispatch(xmq, IP_RAWOUTPUT);
1931 }
1932 
1933 void
ip_send_dispatch(void * xmq)1934 ip_send_dispatch(void *xmq)
1935 {
1936 	ip_send_do_dispatch(xmq, 0);
1937 }
1938 
1939 void
ip_send(struct mbuf * m)1940 ip_send(struct mbuf *m)
1941 {
1942 	mq_enqueue(&ipsend_mq, m);
1943 	task_add(net_tq(0), &ipsend_task);
1944 }
1945 
1946 void
ip_send_raw(struct mbuf * m)1947 ip_send_raw(struct mbuf *m)
1948 {
1949 	mq_enqueue(&ipsendraw_mq, m);
1950 	task_add(net_tq(0), &ipsendraw_task);
1951 }
1952