xref: /freebsd/sys/netinet/tcp_lro.c (revision 2b833162)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007, Myricom Inc.
5  * Copyright (c) 2008, Intel Corporation.
6  * Copyright (c) 2012 The FreeBSD Foundation
7  * Copyright (c) 2016-2021 Mellanox Technologies.
8  * All rights reserved.
9  *
10  * Portions of this software were developed by Bjoern Zeeb
11  * under sponsorship from the FreeBSD Foundation.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sockbuf.h>
49 #include <sys/sysctl.h>
50 
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/ethernet.h>
54 #include <net/bpf.h>
55 #include <net/vnet.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_private.h>
59 #include <net/if_types.h>
60 #include <net/infiniband.h>
61 #include <net/if_lagg.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip6.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet6/in6_pcb.h>
70 #include <netinet/tcp.h>
71 #include <netinet/tcp_seq.h>
72 #include <netinet/tcp_lro.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/tcp_hpts.h>
76 #include <netinet/tcp_log_buf.h>
77 #include <netinet/tcp_fsm.h>
78 #include <netinet/udp.h>
79 #include <netinet6/ip6_var.h>
80 
81 #include <machine/in_cksum.h>
82 
83 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
84 
85 #define	TCP_LRO_TS_OPTION \
86     ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
87 	  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
88 
89 static void	tcp_lro_rx_done(struct lro_ctrl *lc);
90 static int	tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m,
91 		    uint32_t csum, bool use_hash);
92 
93 #ifdef TCPHPTS
94 static bool	do_bpf_strip_and_compress(struct inpcb *, struct lro_ctrl *,
95 		struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf **,
96  		bool *, bool, bool, struct ifnet *, bool);
97 
98 #endif
99 
100 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro,  CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
101     "TCP LRO");
102 
103 static long tcplro_stacks_wanting_mbufq;
104 counter_u64_t tcp_inp_lro_direct_queue;
105 counter_u64_t tcp_inp_lro_wokeup_queue;
106 counter_u64_t tcp_inp_lro_compressed;
107 counter_u64_t tcp_inp_lro_locks_taken;
108 counter_u64_t tcp_extra_mbuf;
109 counter_u64_t tcp_would_have_but;
110 counter_u64_t tcp_comp_total;
111 counter_u64_t tcp_uncomp_total;
112 counter_u64_t tcp_bad_csums;
113 
114 static unsigned	tcp_lro_entries = TCP_LRO_ENTRIES;
115 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
116     CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
117     "default number of LRO entries");
118 
119 static uint32_t tcp_lro_cpu_set_thresh = TCP_LRO_CPU_DECLARATION_THRESH;
120 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_cpu_threshold,
121     CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_cpu_set_thresh, 0,
122     "Number of interrupts in a row on the same CPU that will make us declare an 'affinity' cpu?");
123 
124 static uint32_t tcp_less_accurate_lro_ts = 0;
125 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_less_accurate,
126     CTLFLAG_MPSAFE, &tcp_less_accurate_lro_ts, 0,
127     "Do we trade off efficency by doing less timestamp operations for time accuracy?");
128 
129 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD,
130     &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport");
131 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD,
132     &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts");
133 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD,
134     &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport");
135 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD,
136     &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken");
137 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD,
138     &tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp");
139 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD,
140     &tcp_would_have_but, "Number of times we would have had an extra compressed, but mget failed");
141 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD,
142     &tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set");
143 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD,
144     &tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP");
145 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lro_badcsum, CTLFLAG_RD,
146     &tcp_bad_csums, "Number of packets that the common code saw with bad csums");
147 
148 void
149 tcp_lro_reg_mbufq(void)
150 {
151 	atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1);
152 }
153 
154 void
155 tcp_lro_dereg_mbufq(void)
156 {
157 	atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1);
158 }
159 
160 static __inline void
161 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
162     struct lro_entry *le)
163 {
164 
165 	LIST_INSERT_HEAD(&lc->lro_active, le, next);
166 	LIST_INSERT_HEAD(bucket, le, hash_next);
167 }
168 
169 static __inline void
170 tcp_lro_active_remove(struct lro_entry *le)
171 {
172 
173 	LIST_REMOVE(le, next);		/* active list */
174 	LIST_REMOVE(le, hash_next);	/* hash bucket */
175 }
176 
177 int
178 tcp_lro_init(struct lro_ctrl *lc)
179 {
180 	return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
181 }
182 
183 int
184 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
185     unsigned lro_entries, unsigned lro_mbufs)
186 {
187 	struct lro_entry *le;
188 	size_t size;
189 	unsigned i, elements;
190 
191 	lc->lro_bad_csum = 0;
192 	lc->lro_queued = 0;
193 	lc->lro_flushed = 0;
194 	lc->lro_mbuf_count = 0;
195 	lc->lro_mbuf_max = lro_mbufs;
196 	lc->lro_cnt = lro_entries;
197 	lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
198 	lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
199 	lc->ifp = ifp;
200 	LIST_INIT(&lc->lro_free);
201 	LIST_INIT(&lc->lro_active);
202 
203 	/* create hash table to accelerate entry lookup */
204 	if (lro_entries > lro_mbufs)
205 		elements = lro_entries;
206 	else
207 		elements = lro_mbufs;
208 	lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz,
209 	    HASH_NOWAIT);
210 	if (lc->lro_hash == NULL) {
211 		memset(lc, 0, sizeof(*lc));
212 		return (ENOMEM);
213 	}
214 
215 	/* compute size to allocate */
216 	size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
217 	    (lro_entries * sizeof(*le));
218 	lc->lro_mbuf_data = (struct lro_mbuf_sort *)
219 	    malloc(size, M_LRO, M_NOWAIT | M_ZERO);
220 
221 	/* check for out of memory */
222 	if (lc->lro_mbuf_data == NULL) {
223 		free(lc->lro_hash, M_LRO);
224 		memset(lc, 0, sizeof(*lc));
225 		return (ENOMEM);
226 	}
227 	/* compute offset for LRO entries */
228 	le = (struct lro_entry *)
229 	    (lc->lro_mbuf_data + lro_mbufs);
230 
231 	/* setup linked list */
232 	for (i = 0; i != lro_entries; i++)
233 		LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
234 
235 	return (0);
236 }
237 
238 struct vxlan_header {
239 	uint32_t	vxlh_flags;
240 	uint32_t	vxlh_vni;
241 };
242 
243 static inline void *
244 tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan, int mlen)
245 {
246 	const struct ether_vlan_header *eh;
247 	void *old;
248 	uint16_t eth_type;
249 
250 	if (update_data)
251 		memset(parser, 0, sizeof(*parser));
252 
253 	old = ptr;
254 
255 	if (is_vxlan) {
256 		const struct vxlan_header *vxh;
257 		vxh = ptr;
258 		ptr = (uint8_t *)ptr + sizeof(*vxh);
259 		if (update_data) {
260 			parser->data.vxlan_vni =
261 			    vxh->vxlh_vni & htonl(0xffffff00);
262 		}
263 	}
264 
265 	eh = ptr;
266 	if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) {
267 		eth_type = eh->evl_proto;
268 		if (update_data) {
269 			/* strip priority and keep VLAN ID only */
270 			parser->data.vlan_id = eh->evl_tag & htons(EVL_VLID_MASK);
271 		}
272 		/* advance to next header */
273 		ptr = (uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
274 		mlen -= (ETHER_HDR_LEN  + ETHER_VLAN_ENCAP_LEN);
275 	} else {
276 		eth_type = eh->evl_encap_proto;
277 		/* advance to next header */
278 		mlen -= ETHER_HDR_LEN;
279 		ptr = (uint8_t *)ptr + ETHER_HDR_LEN;
280 	}
281 	if (__predict_false(mlen <= 0))
282 		return (NULL);
283 	switch (eth_type) {
284 #ifdef INET
285 	case htons(ETHERTYPE_IP):
286 		parser->ip4 = ptr;
287 		if (__predict_false(mlen < sizeof(struct ip)))
288 			return (NULL);
289 		/* Ensure there are no IPv4 options. */
290 		if ((parser->ip4->ip_hl << 2) != sizeof (*parser->ip4))
291 			break;
292 		/* .. and the packet is not fragmented. */
293 		if (parser->ip4->ip_off & htons(IP_MF|IP_OFFMASK))
294 			break;
295 		/* .. and the packet has valid src/dst addrs */
296 		if (__predict_false(parser->ip4->ip_src.s_addr == INADDR_ANY ||
297 			parser->ip4->ip_dst.s_addr == INADDR_ANY))
298 			break;
299 		ptr = (uint8_t *)ptr + (parser->ip4->ip_hl << 2);
300 		mlen -= sizeof(struct ip);
301 		if (update_data) {
302 			parser->data.s_addr.v4 = parser->ip4->ip_src;
303 			parser->data.d_addr.v4 = parser->ip4->ip_dst;
304 		}
305 		switch (parser->ip4->ip_p) {
306 		case IPPROTO_UDP:
307 			if (__predict_false(mlen < sizeof(struct udphdr)))
308 				return (NULL);
309 			parser->udp = ptr;
310 			if (update_data) {
311 				parser->data.lro_type = LRO_TYPE_IPV4_UDP;
312 				parser->data.s_port = parser->udp->uh_sport;
313 				parser->data.d_port = parser->udp->uh_dport;
314 			} else {
315 				MPASS(parser->data.lro_type == LRO_TYPE_IPV4_UDP);
316 			}
317 			ptr = ((uint8_t *)ptr + sizeof(*parser->udp));
318 			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
319 			return (ptr);
320 		case IPPROTO_TCP:
321 			parser->tcp = ptr;
322 			if (__predict_false(mlen < sizeof(struct tcphdr)))
323 				return (NULL);
324 			if (update_data) {
325 				parser->data.lro_type = LRO_TYPE_IPV4_TCP;
326 				parser->data.s_port = parser->tcp->th_sport;
327 				parser->data.d_port = parser->tcp->th_dport;
328 			} else {
329 				MPASS(parser->data.lro_type == LRO_TYPE_IPV4_TCP);
330 			}
331 			if (__predict_false(mlen < (parser->tcp->th_off << 2)))
332 				return (NULL);
333 			ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
334 			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
335 			return (ptr);
336 		default:
337 			break;
338 		}
339 		break;
340 #endif
341 #ifdef INET6
342 	case htons(ETHERTYPE_IPV6):
343 		parser->ip6 = ptr;
344 		if (__predict_false(mlen < sizeof(struct ip6_hdr)))
345 			return (NULL);
346 		/* Ensure the packet has valid src/dst addrs */
347 		if (__predict_false(IN6_IS_ADDR_UNSPECIFIED(&parser->ip6->ip6_src) ||
348 			IN6_IS_ADDR_UNSPECIFIED(&parser->ip6->ip6_dst)))
349 			return (NULL);
350 		ptr = (uint8_t *)ptr + sizeof(*parser->ip6);
351 		if (update_data) {
352 			parser->data.s_addr.v6 = parser->ip6->ip6_src;
353 			parser->data.d_addr.v6 = parser->ip6->ip6_dst;
354 		}
355 		mlen -= sizeof(struct ip6_hdr);
356 		switch (parser->ip6->ip6_nxt) {
357 		case IPPROTO_UDP:
358 			if (__predict_false(mlen < sizeof(struct udphdr)))
359 				return (NULL);
360 			parser->udp = ptr;
361 			if (update_data) {
362 				parser->data.lro_type = LRO_TYPE_IPV6_UDP;
363 				parser->data.s_port = parser->udp->uh_sport;
364 				parser->data.d_port = parser->udp->uh_dport;
365 			} else {
366 				MPASS(parser->data.lro_type == LRO_TYPE_IPV6_UDP);
367 			}
368 			ptr = (uint8_t *)ptr + sizeof(*parser->udp);
369 			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
370 			return (ptr);
371 		case IPPROTO_TCP:
372 			if (__predict_false(mlen < sizeof(struct tcphdr)))
373 				return (NULL);
374 			parser->tcp = ptr;
375 			if (update_data) {
376 				parser->data.lro_type = LRO_TYPE_IPV6_TCP;
377 				parser->data.s_port = parser->tcp->th_sport;
378 				parser->data.d_port = parser->tcp->th_dport;
379 			} else {
380 				MPASS(parser->data.lro_type == LRO_TYPE_IPV6_TCP);
381 			}
382 			if (__predict_false(mlen < (parser->tcp->th_off << 2)))
383 				return (NULL);
384 			ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
385 			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
386 			return (ptr);
387 		default:
388 			break;
389 		}
390 		break;
391 #endif
392 	default:
393 		break;
394 	}
395 	/* Invalid packet - cannot parse */
396 	return (NULL);
397 }
398 
399 static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
400     CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID;
401 
402 static inline struct lro_parser *
403 tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data)
404 {
405 	void *data_ptr;
406 
407 	/* Try to parse outer headers first. */
408 	data_ptr = tcp_lro_low_level_parser(m->m_data, po, update_data, false, m->m_len);
409 	if (data_ptr == NULL || po->total_hdr_len > m->m_len)
410 		return (NULL);
411 
412 	if (update_data) {
413 		/* Store VLAN ID, if any. */
414 		if (__predict_false(m->m_flags & M_VLANTAG)) {
415 			po->data.vlan_id =
416 			    htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK);
417 		}
418 		/* Store decrypted flag, if any. */
419 		if (__predict_false((m->m_pkthdr.csum_flags &
420 		    CSUM_TLS_MASK) == CSUM_TLS_DECRYPTED))
421 			po->data.lro_flags |= LRO_FLAG_DECRYPTED;
422 	}
423 
424 	switch (po->data.lro_type) {
425 	case LRO_TYPE_IPV4_UDP:
426 	case LRO_TYPE_IPV6_UDP:
427 		/* Check for VXLAN headers. */
428 		if ((m->m_pkthdr.csum_flags & vxlan_csum) != vxlan_csum)
429 			break;
430 
431 		/* Try to parse inner headers. */
432 		data_ptr = tcp_lro_low_level_parser(data_ptr, pi, update_data, true,
433 						    (m->m_len - ((caddr_t)data_ptr - m->m_data)));
434 		if (data_ptr == NULL || (pi->total_hdr_len + po->total_hdr_len) > m->m_len)
435 			break;
436 
437 		/* Verify supported header types. */
438 		switch (pi->data.lro_type) {
439 		case LRO_TYPE_IPV4_TCP:
440 		case LRO_TYPE_IPV6_TCP:
441 			return (pi);
442 		default:
443 			break;
444 		}
445 		break;
446 	case LRO_TYPE_IPV4_TCP:
447 	case LRO_TYPE_IPV6_TCP:
448 		if (update_data)
449 			memset(pi, 0, sizeof(*pi));
450 		return (po);
451 	default:
452 		break;
453 	}
454 	return (NULL);
455 }
456 
457 static inline int
458 tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po)
459 {
460 	int len;
461 
462 	switch (po->data.lro_type) {
463 #ifdef INET
464 	case LRO_TYPE_IPV4_TCP:
465 		len = ((uint8_t *)po->ip4 - (uint8_t *)m->m_data) +
466 		    ntohs(po->ip4->ip_len);
467 		break;
468 #endif
469 #ifdef INET6
470 	case LRO_TYPE_IPV6_TCP:
471 		len = ((uint8_t *)po->ip6 - (uint8_t *)m->m_data) +
472 		    ntohs(po->ip6->ip6_plen) + sizeof(*po->ip6);
473 		break;
474 #endif
475 	default:
476 		return (TCP_LRO_CANNOT);
477 	}
478 
479 	/*
480 	 * If the frame is padded beyond the end of the IP packet,
481 	 * then trim the extra bytes off:
482 	 */
483 	if (__predict_true(m->m_pkthdr.len == len)) {
484 		return (0);
485 	} else if (m->m_pkthdr.len > len) {
486 		m_adj(m, len - m->m_pkthdr.len);
487 		return (0);
488 	}
489 	return (TCP_LRO_CANNOT);
490 }
491 
492 static struct tcphdr *
493 tcp_lro_get_th(struct mbuf *m)
494 {
495 	return ((struct tcphdr *)((uint8_t *)m->m_data + m->m_pkthdr.lro_tcp_h_off));
496 }
497 
498 static void
499 lro_free_mbuf_chain(struct mbuf *m)
500 {
501 	struct mbuf *save;
502 
503 	while (m) {
504 		save = m->m_nextpkt;
505 		m->m_nextpkt = NULL;
506 		m_freem(m);
507 		m = save;
508 	}
509 }
510 
511 void
512 tcp_lro_free(struct lro_ctrl *lc)
513 {
514 	struct lro_entry *le;
515 	unsigned x;
516 
517 	/* reset LRO free list */
518 	LIST_INIT(&lc->lro_free);
519 
520 	/* free active mbufs, if any */
521 	while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
522 		tcp_lro_active_remove(le);
523 		lro_free_mbuf_chain(le->m_head);
524 	}
525 
526 	/* free hash table */
527 	free(lc->lro_hash, M_LRO);
528 	lc->lro_hash = NULL;
529 	lc->lro_hashsz = 0;
530 
531 	/* free mbuf array, if any */
532 	for (x = 0; x != lc->lro_mbuf_count; x++)
533 		m_freem(lc->lro_mbuf_data[x].mb);
534 	lc->lro_mbuf_count = 0;
535 
536 	/* free allocated memory, if any */
537 	free(lc->lro_mbuf_data, M_LRO);
538 	lc->lro_mbuf_data = NULL;
539 }
540 
541 static uint16_t
542 tcp_lro_rx_csum_tcphdr(const struct tcphdr *th)
543 {
544 	const uint16_t *ptr;
545 	uint32_t csum;
546 	uint16_t len;
547 
548 	csum = -th->th_sum;	/* exclude checksum field */
549 	len = th->th_off;
550 	ptr = (const uint16_t *)th;
551 	while (len--) {
552 		csum += *ptr;
553 		ptr++;
554 		csum += *ptr;
555 		ptr++;
556 	}
557 	while (csum > 0xffff)
558 		csum = (csum >> 16) + (csum & 0xffff);
559 
560 	return (csum);
561 }
562 
563 static uint16_t
564 tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum)
565 {
566 	uint32_t c;
567 	uint16_t cs;
568 
569 	c = tcp_csum;
570 
571 	switch (pa->data.lro_type) {
572 #ifdef INET6
573 	case LRO_TYPE_IPV6_TCP:
574 		/* Compute full pseudo IPv6 header checksum. */
575 		cs = in6_cksum_pseudo(pa->ip6, ntohs(pa->ip6->ip6_plen), pa->ip6->ip6_nxt, 0);
576 		break;
577 #endif
578 #ifdef INET
579 	case LRO_TYPE_IPV4_TCP:
580 		/* Compute full pseudo IPv4 header checsum. */
581 		cs = in_addword(ntohs(pa->ip4->ip_len) - sizeof(*pa->ip4), IPPROTO_TCP);
582 		cs = in_pseudo(pa->ip4->ip_src.s_addr, pa->ip4->ip_dst.s_addr, htons(cs));
583 		break;
584 #endif
585 	default:
586 		cs = 0;		/* Keep compiler happy. */
587 		break;
588 	}
589 
590 	/* Complement checksum. */
591 	cs = ~cs;
592 	c += cs;
593 
594 	/* Remove TCP header checksum. */
595 	cs = ~tcp_lro_rx_csum_tcphdr(pa->tcp);
596 	c += cs;
597 
598 	/* Compute checksum remainder. */
599 	while (c > 0xffff)
600 		c = (c >> 16) + (c & 0xffff);
601 
602 	return (c);
603 }
604 
605 static void
606 tcp_lro_rx_done(struct lro_ctrl *lc)
607 {
608 	struct lro_entry *le;
609 
610 	while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
611 		tcp_lro_active_remove(le);
612 		tcp_lro_flush(lc, le);
613 	}
614 }
615 
616 static void
617 tcp_lro_flush_active(struct lro_ctrl *lc)
618 {
619 	struct lro_entry *le;
620 
621 	/*
622 	 * Walk through the list of le entries, and
623 	 * any one that does have packets flush. This
624 	 * is called because we have an inbound packet
625 	 * (e.g. SYN) that has to have all others flushed
626 	 * in front of it. Note we have to do the remove
627 	 * because tcp_lro_flush() assumes that the entry
628 	 * is being freed. This is ok it will just get
629 	 * reallocated again like it was new.
630 	 */
631 	LIST_FOREACH(le, &lc->lro_active, next) {
632 		if (le->m_head != NULL) {
633 			tcp_lro_active_remove(le);
634 			tcp_lro_flush(lc, le);
635 		}
636 	}
637 }
638 
639 void
640 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
641 {
642 	struct lro_entry *le, *le_tmp;
643 	uint64_t now, tov;
644 	struct bintime bt;
645 
646 	NET_EPOCH_ASSERT();
647 	if (LIST_EMPTY(&lc->lro_active))
648 		return;
649 
650 	/* get timeout time and current time in ns */
651 	binuptime(&bt);
652 	now = bintime2ns(&bt);
653 	tov = ((timeout->tv_sec * 1000000000) + (timeout->tv_usec * 1000));
654 	LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
655 		if (now >= (bintime2ns(&le->alloc_time) + tov)) {
656 			tcp_lro_active_remove(le);
657 			tcp_lro_flush(lc, le);
658 		}
659 	}
660 }
661 
662 #ifdef INET
663 static int
664 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4)
665 {
666 	uint16_t csum;
667 
668 	/* Legacy IP has a header checksum that needs to be correct. */
669 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
670 		if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) {
671 			lc->lro_bad_csum++;
672 			return (TCP_LRO_CANNOT);
673 		}
674 	} else {
675 		csum = in_cksum_hdr(ip4);
676 		if (__predict_false(csum != 0)) {
677 			lc->lro_bad_csum++;
678 			return (TCP_LRO_CANNOT);
679 		}
680 	}
681 	return (0);
682 }
683 #endif
684 
685 #ifdef TCPHPTS
686 static void
687 tcp_lro_log(struct tcpcb *tp, const struct lro_ctrl *lc,
688     const struct lro_entry *le, const struct mbuf *m,
689     int frm, int32_t tcp_data_len, uint32_t th_seq,
690     uint32_t th_ack, uint16_t th_win)
691 {
692 	if (tcp_bblogging_on(tp)) {
693 		union tcp_log_stackspecific log;
694 		struct timeval tv, btv;
695 		uint32_t cts;
696 
697 		cts = tcp_get_usecs(&tv);
698 		memset(&log, 0, sizeof(union tcp_log_stackspecific));
699 		log.u_bbr.flex8 = frm;
700 		log.u_bbr.flex1 = tcp_data_len;
701 		if (m)
702 			log.u_bbr.flex2 = m->m_pkthdr.len;
703 		else
704 			log.u_bbr.flex2 = 0;
705 		if (le->m_head) {
706 			log.u_bbr.flex3 = le->m_head->m_pkthdr.lro_nsegs;
707 			log.u_bbr.flex4 = le->m_head->m_pkthdr.lro_tcp_d_len;
708 			log.u_bbr.flex5 = le->m_head->m_pkthdr.len;
709 			log.u_bbr.delRate = le->m_head->m_flags;
710 			log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp;
711 		}
712 		log.u_bbr.inflight = th_seq;
713 		log.u_bbr.delivered = th_ack;
714 		log.u_bbr.timeStamp = cts;
715 		log.u_bbr.epoch = le->next_seq;
716 		log.u_bbr.lt_epoch = le->ack_seq;
717 		log.u_bbr.pacing_gain = th_win;
718 		log.u_bbr.cwnd_gain = le->window;
719 		log.u_bbr.lost = curcpu;
720 		log.u_bbr.cur_del_rate = (uintptr_t)m;
721 		log.u_bbr.bw_inuse = (uintptr_t)le->m_head;
722 		bintime2timeval(&lc->lro_last_queue_time, &btv);
723 		log.u_bbr.flex6 = tcp_tv_to_usectick(&btv);
724 		log.u_bbr.flex7 = le->compressed;
725 		log.u_bbr.pacing_gain = le->uncompressed;
726 		if (in_epoch(net_epoch_preempt))
727 			log.u_bbr.inhpts = 1;
728 		else
729 			log.u_bbr.inhpts = 0;
730 		TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
731 		    &tptosocket(tp)->so_snd,
732 		    TCP_LOG_LRO, 0, 0, &log, false, &tv);
733 	}
734 }
735 #endif
736 
737 static inline void
738 tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum)
739 {
740 	uint32_t csum;
741 
742 	csum = 0xffff - *ptr + value;
743 	while (csum > 0xffff)
744 		csum = (csum >> 16) + (csum & 0xffff);
745 	*ptr = value;
746 	*psum = csum;
747 }
748 
749 static uint16_t
750 tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le,
751     uint16_t payload_len, uint16_t delta_sum)
752 {
753 	uint32_t csum;
754 	uint16_t tlen;
755 	uint16_t temp[5] = {};
756 
757 	switch (pa->data.lro_type) {
758 	case LRO_TYPE_IPV4_TCP:
759 		/* Compute new IPv4 length. */
760 		tlen = (pa->ip4->ip_hl << 2) + (pa->tcp->th_off << 2) + payload_len;
761 		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
762 
763 		/* Subtract delta from current IPv4 checksum. */
764 		csum = pa->ip4->ip_sum + 0xffff - temp[0];
765 		while (csum > 0xffff)
766 			csum = (csum >> 16) + (csum & 0xffff);
767 		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
768 		goto update_tcp_header;
769 
770 	case LRO_TYPE_IPV6_TCP:
771 		/* Compute new IPv6 length. */
772 		tlen = (pa->tcp->th_off << 2) + payload_len;
773 		tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
774 		goto update_tcp_header;
775 
776 	case LRO_TYPE_IPV4_UDP:
777 		/* Compute new IPv4 length. */
778 		tlen = (pa->ip4->ip_hl << 2) + sizeof(*pa->udp) + payload_len;
779 		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
780 
781 		/* Subtract delta from current IPv4 checksum. */
782 		csum = pa->ip4->ip_sum + 0xffff - temp[0];
783 		while (csum > 0xffff)
784 			csum = (csum >> 16) + (csum & 0xffff);
785 		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
786 		goto update_udp_header;
787 
788 	case LRO_TYPE_IPV6_UDP:
789 		/* Compute new IPv6 length. */
790 		tlen = sizeof(*pa->udp) + payload_len;
791 		tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
792 		goto update_udp_header;
793 
794 	default:
795 		return (0);
796 	}
797 
798 update_tcp_header:
799 	/* Compute current TCP header checksum. */
800 	temp[2] = tcp_lro_rx_csum_tcphdr(pa->tcp);
801 
802 	/* Incorporate the latest ACK into the TCP header. */
803 	pa->tcp->th_ack = le->ack_seq;
804 	pa->tcp->th_win = le->window;
805 
806 	/* Incorporate latest timestamp into the TCP header. */
807 	if (le->timestamp != 0) {
808 		uint32_t *ts_ptr;
809 
810 		ts_ptr = (uint32_t *)(pa->tcp + 1);
811 		ts_ptr[1] = htonl(le->tsval);
812 		ts_ptr[2] = le->tsecr;
813 	}
814 
815 	/* Compute new TCP header checksum. */
816 	temp[3] = tcp_lro_rx_csum_tcphdr(pa->tcp);
817 
818 	/* Compute new TCP checksum. */
819 	csum = pa->tcp->th_sum + 0xffff - delta_sum +
820 	    0xffff - temp[0] + 0xffff - temp[3] + temp[2];
821 	while (csum > 0xffff)
822 		csum = (csum >> 16) + (csum & 0xffff);
823 
824 	/* Assign new TCP checksum. */
825 	tcp_lro_assign_and_checksum_16(&pa->tcp->th_sum, csum, &temp[4]);
826 
827 	/* Compute all modififications affecting next checksum. */
828 	csum = temp[0] + temp[1] + 0xffff - temp[2] +
829 	    temp[3] + temp[4] + delta_sum;
830 	while (csum > 0xffff)
831 		csum = (csum >> 16) + (csum & 0xffff);
832 
833 	/* Return delta checksum to next stage, if any. */
834 	return (csum);
835 
836 update_udp_header:
837 	tlen = sizeof(*pa->udp) + payload_len;
838 	/* Assign new UDP length and compute checksum delta. */
839 	tcp_lro_assign_and_checksum_16(&pa->udp->uh_ulen, htons(tlen), &temp[2]);
840 
841 	/* Check if there is a UDP checksum. */
842 	if (__predict_false(pa->udp->uh_sum != 0)) {
843 		/* Compute new UDP checksum. */
844 		csum = pa->udp->uh_sum + 0xffff - delta_sum +
845 		    0xffff - temp[0] + 0xffff - temp[2];
846 		while (csum > 0xffff)
847 			csum = (csum >> 16) + (csum & 0xffff);
848 		/* Assign new UDP checksum. */
849 		tcp_lro_assign_and_checksum_16(&pa->udp->uh_sum, csum, &temp[3]);
850 	}
851 
852 	/* Compute all modififications affecting next checksum. */
853 	csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum;
854 	while (csum > 0xffff)
855 		csum = (csum >> 16) + (csum & 0xffff);
856 
857 	/* Return delta checksum to next stage, if any. */
858 	return (csum);
859 }
860 
861 static void
862 tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le)
863 {
864 	/* Check if we need to recompute any checksums. */
865 	if (le->needs_merge) {
866 		uint16_t csum;
867 
868 		switch (le->inner.data.lro_type) {
869 		case LRO_TYPE_IPV4_TCP:
870 			csum = tcp_lro_update_checksum(&le->inner, le,
871 			    le->m_head->m_pkthdr.lro_tcp_d_len,
872 			    le->m_head->m_pkthdr.lro_tcp_d_csum);
873 			csum = tcp_lro_update_checksum(&le->outer, NULL,
874 			    le->m_head->m_pkthdr.lro_tcp_d_len +
875 			    le->inner.total_hdr_len, csum);
876 			le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
877 			    CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
878 			le->m_head->m_pkthdr.csum_data = 0xffff;
879 			if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
880 				le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
881 			break;
882 		case LRO_TYPE_IPV6_TCP:
883 			csum = tcp_lro_update_checksum(&le->inner, le,
884 			    le->m_head->m_pkthdr.lro_tcp_d_len,
885 			    le->m_head->m_pkthdr.lro_tcp_d_csum);
886 			csum = tcp_lro_update_checksum(&le->outer, NULL,
887 			    le->m_head->m_pkthdr.lro_tcp_d_len +
888 			    le->inner.total_hdr_len, csum);
889 			le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
890 			    CSUM_PSEUDO_HDR;
891 			le->m_head->m_pkthdr.csum_data = 0xffff;
892 			if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
893 				le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
894 			break;
895 		case LRO_TYPE_NONE:
896 			switch (le->outer.data.lro_type) {
897 			case LRO_TYPE_IPV4_TCP:
898 				csum = tcp_lro_update_checksum(&le->outer, le,
899 				    le->m_head->m_pkthdr.lro_tcp_d_len,
900 				    le->m_head->m_pkthdr.lro_tcp_d_csum);
901 				le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
902 				    CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
903 				le->m_head->m_pkthdr.csum_data = 0xffff;
904 				if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
905 					le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
906 				break;
907 			case LRO_TYPE_IPV6_TCP:
908 				csum = tcp_lro_update_checksum(&le->outer, le,
909 				    le->m_head->m_pkthdr.lro_tcp_d_len,
910 				    le->m_head->m_pkthdr.lro_tcp_d_csum);
911 				le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
912 				    CSUM_PSEUDO_HDR;
913 				le->m_head->m_pkthdr.csum_data = 0xffff;
914 				if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
915 					le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
916 				break;
917 			default:
918 				break;
919 			}
920 			break;
921 		default:
922 			break;
923 		}
924 	}
925 
926 	/*
927 	 * Break any chain, this is not set to NULL on the singleton
928 	 * case m_nextpkt points to m_head. Other case set them
929 	 * m_nextpkt to NULL in push_and_replace.
930 	 */
931 	le->m_head->m_nextpkt = NULL;
932 	lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs;
933 	(*lc->ifp->if_input)(lc->ifp, le->m_head);
934 }
935 
936 static void
937 tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le,
938     struct mbuf *m, struct tcphdr *th)
939 {
940 	uint32_t *ts_ptr;
941 	uint16_t tcp_data_len;
942 	uint16_t tcp_opt_len;
943 
944 	ts_ptr = (uint32_t *)(th + 1);
945 	tcp_opt_len = (th->th_off << 2);
946 	tcp_opt_len -= sizeof(*th);
947 
948 	/* Check if there is a timestamp option. */
949 	if (tcp_opt_len == 0 ||
950 	    __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
951 	    *ts_ptr != TCP_LRO_TS_OPTION)) {
952 		/* We failed to find the timestamp option. */
953 		le->timestamp = 0;
954 	} else {
955 		le->timestamp = 1;
956 		le->tsval = ntohl(*(ts_ptr + 1));
957 		le->tsecr = *(ts_ptr + 2);
958 	}
959 
960 	tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
961 
962 	/* Pull out TCP sequence numbers and window size. */
963 	le->next_seq = ntohl(th->th_seq) + tcp_data_len;
964 	le->ack_seq = th->th_ack;
965 	le->window = th->th_win;
966 	le->flags = tcp_get_flags(th);
967 	le->needs_merge = 0;
968 
969 	/* Setup new data pointers. */
970 	le->m_head = m;
971 	le->m_tail = m_last(m);
972 }
973 
974 static void
975 tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
976 {
977 	struct lro_parser *pa;
978 
979 	/*
980 	 * Push up the stack of the current entry
981 	 * and replace it with "m".
982 	 */
983 	struct mbuf *msave;
984 
985 	/* Grab off the next and save it */
986 	msave = le->m_head->m_nextpkt;
987 	le->m_head->m_nextpkt = NULL;
988 
989 	/* Now push out the old entry */
990 	tcp_flush_out_entry(lc, le);
991 
992 	/* Re-parse new header, should not fail. */
993 	pa = tcp_lro_parser(m, &le->outer, &le->inner, false);
994 	KASSERT(pa != NULL,
995 	    ("tcp_push_and_replace: LRO parser failed on m=%p\n", m));
996 
997 	/*
998 	 * Now to replace the data properly in the entry
999 	 * we have to reset the TCP header and
1000 	 * other fields.
1001 	 */
1002 	tcp_set_entry_to_mbuf(lc, le, m, pa->tcp);
1003 
1004 	/* Restore the next list */
1005 	m->m_nextpkt = msave;
1006 }
1007 
1008 static void
1009 tcp_lro_mbuf_append_pkthdr(struct lro_entry *le, const struct mbuf *p)
1010 {
1011 	struct mbuf *m;
1012 	uint32_t csum;
1013 
1014 	m = le->m_head;
1015 	if (m->m_pkthdr.lro_nsegs == 1) {
1016 		/* Compute relative checksum. */
1017 		csum = p->m_pkthdr.lro_tcp_d_csum;
1018 	} else {
1019 		/* Merge TCP data checksums. */
1020 		csum = (uint32_t)m->m_pkthdr.lro_tcp_d_csum +
1021 		    (uint32_t)p->m_pkthdr.lro_tcp_d_csum;
1022 		while (csum > 0xffff)
1023 			csum = (csum >> 16) + (csum & 0xffff);
1024 	}
1025 
1026 	/* Update various counters. */
1027 	m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len;
1028 	m->m_pkthdr.lro_tcp_d_csum = csum;
1029 	m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len;
1030 	m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs;
1031 	le->needs_merge = 1;
1032 }
1033 
1034 static void
1035 tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
1036 {
1037 	/*
1038 	 * Walk through the mbuf chain we
1039 	 * have on tap and compress/condense
1040 	 * as required.
1041 	 */
1042 	uint32_t *ts_ptr;
1043 	struct mbuf *m;
1044 	struct tcphdr *th;
1045 	uint32_t tcp_data_len_total;
1046 	uint32_t tcp_data_seg_total;
1047 	uint16_t tcp_data_len;
1048 	uint16_t tcp_opt_len;
1049 
1050 	/*
1051 	 * First we must check the lead (m_head)
1052 	 * we must make sure that it is *not*
1053 	 * something that should be sent up
1054 	 * right away (sack etc).
1055 	 */
1056 again:
1057 	m = le->m_head->m_nextpkt;
1058 	if (m == NULL) {
1059 		/* Just one left. */
1060 		return;
1061 	}
1062 
1063 	th = tcp_lro_get_th(m);
1064 	tcp_opt_len = (th->th_off << 2);
1065 	tcp_opt_len -= sizeof(*th);
1066 	ts_ptr = (uint32_t *)(th + 1);
1067 
1068 	if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1069 	    *ts_ptr != TCP_LRO_TS_OPTION)) {
1070 		/*
1071 		 * Its not the timestamp. We can't
1072 		 * use this guy as the head.
1073 		 */
1074 		le->m_head->m_nextpkt = m->m_nextpkt;
1075 		tcp_push_and_replace(lc, le, m);
1076 		goto again;
1077 	}
1078 	if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) {
1079 		/*
1080 		 * Make sure that previously seen segments/ACKs are delivered
1081 		 * before this segment, e.g. FIN.
1082 		 */
1083 		le->m_head->m_nextpkt = m->m_nextpkt;
1084 		tcp_push_and_replace(lc, le, m);
1085 		goto again;
1086 	}
1087 	while((m = le->m_head->m_nextpkt) != NULL) {
1088 		/*
1089 		 * condense m into le, first
1090 		 * pull m out of the list.
1091 		 */
1092 		le->m_head->m_nextpkt = m->m_nextpkt;
1093 		m->m_nextpkt = NULL;
1094 		/* Setup my data */
1095 		tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
1096 		th = tcp_lro_get_th(m);
1097 		ts_ptr = (uint32_t *)(th + 1);
1098 		tcp_opt_len = (th->th_off << 2);
1099 		tcp_opt_len -= sizeof(*th);
1100 		tcp_data_len_total = le->m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len;
1101 		tcp_data_seg_total = le->m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs;
1102 
1103 		if (tcp_data_seg_total >= lc->lro_ackcnt_lim ||
1104 		    tcp_data_len_total >= lc->lro_length_lim) {
1105 			/* Flush now if appending will result in overflow. */
1106 			tcp_push_and_replace(lc, le, m);
1107 			goto again;
1108 		}
1109 		if (tcp_opt_len != 0 &&
1110 		    __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1111 		    *ts_ptr != TCP_LRO_TS_OPTION)) {
1112 			/*
1113 			 * Maybe a sack in the new one? We need to
1114 			 * start all over after flushing the
1115 			 * current le. We will go up to the beginning
1116 			 * and flush it (calling the replace again possibly
1117 			 * or just returning).
1118 			 */
1119 			tcp_push_and_replace(lc, le, m);
1120 			goto again;
1121 		}
1122 		if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) {
1123 			tcp_push_and_replace(lc, le, m);
1124 			goto again;
1125 		}
1126 		if (tcp_opt_len != 0) {
1127 			uint32_t tsval = ntohl(*(ts_ptr + 1));
1128 			/* Make sure timestamp values are increasing. */
1129 			if (TSTMP_GT(le->tsval, tsval))  {
1130 				tcp_push_and_replace(lc, le, m);
1131 				goto again;
1132 			}
1133 			le->tsval = tsval;
1134 			le->tsecr = *(ts_ptr + 2);
1135 		}
1136 		/* Try to append the new segment. */
1137 		if (__predict_false(ntohl(th->th_seq) != le->next_seq ||
1138 				    ((tcp_get_flags(th) & TH_ACK) !=
1139 				      (le->flags & TH_ACK)) ||
1140 				    (tcp_data_len == 0 &&
1141 				     le->ack_seq == th->th_ack &&
1142 				     le->window == th->th_win))) {
1143 			/* Out of order packet, non-ACK + ACK or dup ACK. */
1144 			tcp_push_and_replace(lc, le, m);
1145 			goto again;
1146 		}
1147 		if (tcp_data_len != 0 ||
1148 		    SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1149 			le->next_seq += tcp_data_len;
1150 			le->ack_seq = th->th_ack;
1151 			le->window = th->th_win;
1152 			le->needs_merge = 1;
1153 		} else if (th->th_ack == le->ack_seq) {
1154 			if (WIN_GT(th->th_win, le->window)) {
1155 				le->window = th->th_win;
1156 				le->needs_merge = 1;
1157 			}
1158 		}
1159 
1160 		if (tcp_data_len == 0) {
1161 			m_freem(m);
1162 			continue;
1163 		}
1164 
1165 		/* Merge TCP data checksum and length to head mbuf. */
1166 		tcp_lro_mbuf_append_pkthdr(le, m);
1167 
1168 		/*
1169 		 * Adjust the mbuf so that m_data points to the first byte of
1170 		 * the ULP payload.  Adjust the mbuf to avoid complications and
1171 		 * append new segment to existing mbuf chain.
1172 		 */
1173 		m_adj(m, m->m_pkthdr.len - tcp_data_len);
1174 		m_demote_pkthdr(m);
1175 		le->m_tail->m_next = m;
1176 		le->m_tail = m_last(m);
1177 	}
1178 }
1179 
1180 #ifdef TCPHPTS
1181 static void
1182 tcp_queue_pkts(struct inpcb *inp, struct tcpcb *tp, struct lro_entry *le)
1183 {
1184 	INP_WLOCK_ASSERT(inp);
1185 	if (tp->t_in_pkt == NULL) {
1186 		/* Nothing yet there */
1187 		tp->t_in_pkt = le->m_head;
1188 		tp->t_tail_pkt = le->m_last_mbuf;
1189 	} else {
1190 		/* Already some there */
1191 		tp->t_tail_pkt->m_nextpkt = le->m_head;
1192 		tp->t_tail_pkt = le->m_last_mbuf;
1193 	}
1194 	le->m_head = NULL;
1195 	le->m_last_mbuf = NULL;
1196 }
1197 
1198 static bool
1199 tcp_lro_check_wake_status(struct inpcb *inp)
1200 {
1201 	struct tcpcb *tp;
1202 
1203 	tp = intotcpcb(inp);
1204 	if (__predict_false(tp == NULL))
1205 		return (true);
1206 	if (tp->t_fb->tfb_early_wake_check != NULL)
1207 		return ((tp->t_fb->tfb_early_wake_check)(tp));
1208 	return (false);
1209 }
1210 
1211 static struct mbuf *
1212 tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le,
1213     struct inpcb *inp, int32_t *new_m, bool can_append_old_cmp)
1214 {
1215 	struct tcpcb *tp;
1216 	struct mbuf *m;
1217 
1218 	tp = intotcpcb(inp);
1219 	if (__predict_false(tp == NULL))
1220 		return (NULL);
1221 
1222 	/* Look at the last mbuf if any in queue */
1223  	if (can_append_old_cmp) {
1224 		m = tp->t_tail_pkt;
1225 		if (m != NULL && (m->m_flags & M_ACKCMP) != 0) {
1226 			if (M_TRAILINGSPACE(m) >= sizeof(struct tcp_ackent)) {
1227 				tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0);
1228 				*new_m = 0;
1229 				counter_u64_add(tcp_extra_mbuf, 1);
1230 				return (m);
1231 			} else {
1232 				/* Mark we ran out of space */
1233 				inp->inp_flags2 |= INP_MBUF_L_ACKS;
1234 			}
1235 		}
1236 	}
1237 	/* Decide mbuf size. */
1238 	tcp_lro_log(tp, lc, le, NULL, 21, 0, 0, 0, 0);
1239 	if (inp->inp_flags2 & INP_MBUF_L_ACKS)
1240 		m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR);
1241 	else
1242 		m = m_gethdr(M_NOWAIT, MT_DATA);
1243 
1244 	if (__predict_false(m == NULL)) {
1245 		counter_u64_add(tcp_would_have_but, 1);
1246 		return (NULL);
1247 	}
1248 	counter_u64_add(tcp_comp_total, 1);
1249  	m->m_pkthdr.rcvif = lc->ifp;
1250 	m->m_flags |= M_ACKCMP;
1251 	*new_m = 1;
1252 	return (m);
1253 }
1254 
1255 static struct inpcb *
1256 tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa)
1257 {
1258 	struct inpcb *inp;
1259 
1260 	switch (pa->data.lro_type) {
1261 #ifdef INET6
1262 	case LRO_TYPE_IPV6_TCP:
1263 		inp = in6_pcblookup(&V_tcbinfo,
1264 		    &pa->data.s_addr.v6,
1265 		    pa->data.s_port,
1266 		    &pa->data.d_addr.v6,
1267 		    pa->data.d_port,
1268 		    INPLOOKUP_WLOCKPCB,
1269 		    ifp);
1270 		break;
1271 #endif
1272 #ifdef INET
1273 	case LRO_TYPE_IPV4_TCP:
1274 		inp = in_pcblookup(&V_tcbinfo,
1275 		    pa->data.s_addr.v4,
1276 		    pa->data.s_port,
1277 		    pa->data.d_addr.v4,
1278 		    pa->data.d_port,
1279 		    INPLOOKUP_WLOCKPCB,
1280 		    ifp);
1281 		break;
1282 #endif
1283 	default:
1284 		inp = NULL;
1285 		break;
1286 	}
1287 	return (inp);
1288 }
1289 
1290 static inline bool
1291 tcp_lro_ack_valid(struct mbuf *m, struct tcphdr *th, uint32_t **ppts, bool *other_opts)
1292 {
1293 	/*
1294 	 * This function returns two bits of valuable information.
1295 	 * a) Is what is present capable of being ack-compressed,
1296 	 *    we can ack-compress if there is no options or just
1297 	 *    a timestamp option, and of course the th_flags must
1298 	 *    be correct as well.
1299 	 * b) Our other options present such as SACK. This is
1300 	 *    used to determine if we want to wakeup or not.
1301 	 */
1302 	bool ret = true;
1303 
1304 	switch (th->th_off << 2) {
1305 	case (sizeof(*th) + TCPOLEN_TSTAMP_APPA):
1306 		*ppts = (uint32_t *)(th + 1);
1307 		/* Check if we have only one timestamp option. */
1308 		if (**ppts == TCP_LRO_TS_OPTION)
1309 			*other_opts = false;
1310 		else {
1311 			*other_opts = true;
1312 			ret = false;
1313 		}
1314 		break;
1315 	case (sizeof(*th)):
1316 		/* No options. */
1317 		*ppts = NULL;
1318 		*other_opts = false;
1319 		break;
1320 	default:
1321 		*ppts = NULL;
1322 		*other_opts = true;
1323 		ret = false;
1324 		break;
1325 	}
1326 	/* For ACKCMP we only accept ACK, PUSH, ECE and CWR. */
1327 	if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) != 0)
1328 		ret = false;
1329 	/* If it has data on it we cannot compress it */
1330 	if (m->m_pkthdr.lro_tcp_d_len)
1331 		ret = false;
1332 
1333 	/* ACK flag must be set. */
1334 	if (!(tcp_get_flags(th) & TH_ACK))
1335 		ret = false;
1336 	return (ret);
1337 }
1338 
1339 static int
1340 tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le)
1341 {
1342 	struct inpcb *inp;
1343 	struct tcpcb *tp;
1344 	struct mbuf **pp, *cmp, *mv_to;
1345 	struct ifnet *lagg_ifp;
1346  	bool bpf_req, lagg_bpf_req, should_wake, can_append_old_cmp;
1347 
1348 	/* Check if packet doesn't belongs to our network interface. */
1349 	if ((tcplro_stacks_wanting_mbufq == 0) ||
1350 	    (le->outer.data.vlan_id != 0) ||
1351 	    (le->inner.data.lro_type != LRO_TYPE_NONE))
1352 		return (TCP_LRO_CANNOT);
1353 
1354 #ifdef INET6
1355 	/*
1356 	 * Be proactive about unspecified IPv6 address in source. As
1357 	 * we use all-zero to indicate unbounded/unconnected pcb,
1358 	 * unspecified IPv6 address can be used to confuse us.
1359 	 *
1360 	 * Note that packets with unspecified IPv6 destination is
1361 	 * already dropped in ip6_input.
1362 	 */
1363 	if (__predict_false(le->outer.data.lro_type == LRO_TYPE_IPV6_TCP &&
1364 	    IN6_IS_ADDR_UNSPECIFIED(&le->outer.data.s_addr.v6)))
1365 		return (TCP_LRO_CANNOT);
1366 
1367 	if (__predict_false(le->inner.data.lro_type == LRO_TYPE_IPV6_TCP &&
1368 	    IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6)))
1369 		return (TCP_LRO_CANNOT);
1370 #endif
1371 	/* Lookup inp, if any. */
1372 	inp = tcp_lro_lookup(lc->ifp,
1373 	    (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer : &le->inner);
1374 	if (inp == NULL)
1375 		return (TCP_LRO_CANNOT);
1376 
1377 	counter_u64_add(tcp_inp_lro_locks_taken, 1);
1378 
1379 	/* Get TCP control structure. */
1380 	tp = intotcpcb(inp);
1381 
1382 	/* Check if the inp is dead, Jim. */
1383 	if (tp->t_state == TCPS_TIME_WAIT) {
1384 		INP_WUNLOCK(inp);
1385 		return (TCP_LRO_CANNOT);
1386 	}
1387 	if ((inp->inp_irq_cpu_set == 0)  && (lc->lro_cpu_is_set == 1)) {
1388 		inp->inp_irq_cpu = lc->lro_last_cpu;
1389 		inp->inp_irq_cpu_set = 1;
1390 	}
1391 	/* Check if the transport doesn't support the needed optimizations. */
1392 	if ((inp->inp_flags2 & (INP_SUPPORTS_MBUFQ | INP_MBUF_ACKCMP)) == 0) {
1393 		INP_WUNLOCK(inp);
1394 		return (TCP_LRO_CANNOT);
1395 	}
1396 
1397 	if (inp->inp_flags2 & INP_MBUF_QUEUE_READY)
1398 		should_wake = false;
1399 	else
1400 		should_wake = true;
1401 	/* Check if packets should be tapped to BPF. */
1402 	bpf_req = bpf_peers_present(lc->ifp->if_bpf);
1403 	lagg_bpf_req = false;
1404 	lagg_ifp = NULL;
1405 	if (lc->ifp->if_type == IFT_IEEE8023ADLAG ||
1406 	    lc->ifp->if_type == IFT_INFINIBANDLAG) {
1407 		struct lagg_port *lp = lc->ifp->if_lagg;
1408 		struct lagg_softc *sc = lp->lp_softc;
1409 
1410 		lagg_ifp = sc->sc_ifp;
1411 		if (lagg_ifp != NULL)
1412 			lagg_bpf_req = bpf_peers_present(lagg_ifp->if_bpf);
1413 	}
1414 
1415 	/* Strip and compress all the incoming packets. */
1416  	can_append_old_cmp = true;
1417 	cmp = NULL;
1418 	for (pp = &le->m_head; *pp != NULL; ) {
1419 		mv_to = NULL;
1420 		if (do_bpf_strip_and_compress(inp, lc, le, pp,
1421 			&cmp, &mv_to, &should_wake, bpf_req,
1422  			lagg_bpf_req, lagg_ifp, can_append_old_cmp) == false) {
1423 			/* Advance to next mbuf. */
1424 			pp = &(*pp)->m_nextpkt;
1425  			/*
1426  			 * Once we have appended we can't look in the pending
1427  			 * inbound packets for a compressed ack to append to.
1428  			 */
1429  			can_append_old_cmp = false;
1430  			/*
1431  			 * Once we append we also need to stop adding to any
1432  			 * compressed ack we were remembering. A new cmp
1433  			 * ack will be required.
1434  			 */
1435  			cmp = NULL;
1436  			tcp_lro_log(tp, lc, le, NULL, 25, 0, 0, 0, 0);
1437 		} else if (mv_to != NULL) {
1438 			/* We are asked to move pp up */
1439 			pp = &mv_to->m_nextpkt;
1440  			tcp_lro_log(tp, lc, le, NULL, 24, 0, 0, 0, 0);
1441 		} else
1442  			tcp_lro_log(tp, lc, le, NULL, 26, 0, 0, 0, 0);
1443 	}
1444 	/* Update "m_last_mbuf", if any. */
1445 	if (pp == &le->m_head)
1446 		le->m_last_mbuf = *pp;
1447 	else
1448 		le->m_last_mbuf = __containerof(pp, struct mbuf, m_nextpkt);
1449 
1450 	/* Check if any data mbufs left. */
1451 	if (le->m_head != NULL) {
1452 		counter_u64_add(tcp_inp_lro_direct_queue, 1);
1453 		tcp_lro_log(tp, lc, le, NULL, 22, 1, inp->inp_flags2, 0, 1);
1454 		tcp_queue_pkts(inp, tp, le);
1455 	}
1456 	if (should_wake) {
1457 		/* Wakeup */
1458 		counter_u64_add(tcp_inp_lro_wokeup_queue, 1);
1459 		if ((*tp->t_fb->tfb_do_queued_segments)(tp, 0))
1460 			inp = NULL;
1461 	}
1462 	if (inp != NULL)
1463 		INP_WUNLOCK(inp);
1464 	return (0);	/* Success. */
1465 }
1466 #endif
1467 
1468 void
1469 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
1470 {
1471 	/* Only optimise if there are multiple packets waiting. */
1472 #ifdef TCPHPTS
1473 	int error;
1474 #endif
1475 
1476 	NET_EPOCH_ASSERT();
1477 #ifdef TCPHPTS
1478 	CURVNET_SET(lc->ifp->if_vnet);
1479 	error = tcp_lro_flush_tcphpts(lc, le);
1480 	CURVNET_RESTORE();
1481 	if (error != 0) {
1482 #endif
1483 		tcp_lro_condense(lc, le);
1484 		tcp_flush_out_entry(lc, le);
1485 #ifdef TCPHPTS
1486 	}
1487 #endif
1488 	lc->lro_flushed++;
1489 	bzero(le, sizeof(*le));
1490 	LIST_INSERT_HEAD(&lc->lro_free, le, next);
1491 }
1492 
1493 #ifdef HAVE_INLINE_FLSLL
1494 #define	tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
1495 #else
1496 static inline uint64_t
1497 tcp_lro_msb_64(uint64_t x)
1498 {
1499 	x |= (x >> 1);
1500 	x |= (x >> 2);
1501 	x |= (x >> 4);
1502 	x |= (x >> 8);
1503 	x |= (x >> 16);
1504 	x |= (x >> 32);
1505 	return (x & ~(x >> 1));
1506 }
1507 #endif
1508 
1509 /*
1510  * The tcp_lro_sort() routine is comparable to qsort(), except it has
1511  * a worst case complexity limit of O(MIN(N,64)*N), where N is the
1512  * number of elements to sort and 64 is the number of sequence bits
1513  * available. The algorithm is bit-slicing the 64-bit sequence number,
1514  * sorting one bit at a time from the most significant bit until the
1515  * least significant one, skipping the constant bits. This is
1516  * typically called a radix sort.
1517  */
1518 static void
1519 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
1520 {
1521 	struct lro_mbuf_sort temp;
1522 	uint64_t ones;
1523 	uint64_t zeros;
1524 	uint32_t x;
1525 	uint32_t y;
1526 
1527 repeat:
1528 	/* for small arrays insertion sort is faster */
1529 	if (size <= 12) {
1530 		for (x = 1; x < size; x++) {
1531 			temp = parray[x];
1532 			for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
1533 				parray[y] = parray[y - 1];
1534 			parray[y] = temp;
1535 		}
1536 		return;
1537 	}
1538 
1539 	/* compute sequence bits which are constant */
1540 	ones = 0;
1541 	zeros = 0;
1542 	for (x = 0; x != size; x++) {
1543 		ones |= parray[x].seq;
1544 		zeros |= ~parray[x].seq;
1545 	}
1546 
1547 	/* compute bits which are not constant into "ones" */
1548 	ones &= zeros;
1549 	if (ones == 0)
1550 		return;
1551 
1552 	/* pick the most significant bit which is not constant */
1553 	ones = tcp_lro_msb_64(ones);
1554 
1555 	/*
1556 	 * Move entries having cleared sequence bits to the beginning
1557 	 * of the array:
1558 	 */
1559 	for (x = y = 0; y != size; y++) {
1560 		/* skip set bits */
1561 		if (parray[y].seq & ones)
1562 			continue;
1563 		/* swap entries */
1564 		temp = parray[x];
1565 		parray[x] = parray[y];
1566 		parray[y] = temp;
1567 		x++;
1568 	}
1569 
1570 	KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
1571 
1572 	/* sort zeros */
1573 	tcp_lro_sort(parray, x);
1574 
1575 	/* sort ones */
1576 	parray += x;
1577 	size -= x;
1578 	goto repeat;
1579 }
1580 
1581 void
1582 tcp_lro_flush_all(struct lro_ctrl *lc)
1583 {
1584 	uint64_t seq;
1585 	uint64_t nseq;
1586 	unsigned x;
1587 
1588 	NET_EPOCH_ASSERT();
1589 	/* check if no mbufs to flush */
1590 	if (lc->lro_mbuf_count == 0)
1591 		goto done;
1592 	if (lc->lro_cpu_is_set == 0) {
1593 		if (lc->lro_last_cpu == curcpu) {
1594 			lc->lro_cnt_of_same_cpu++;
1595 			/* Have we reached the threshold to declare a cpu? */
1596 			if (lc->lro_cnt_of_same_cpu > tcp_lro_cpu_set_thresh)
1597 				lc->lro_cpu_is_set = 1;
1598 		} else {
1599 			lc->lro_last_cpu = curcpu;
1600 			lc->lro_cnt_of_same_cpu = 0;
1601 		}
1602 	}
1603 	CURVNET_SET(lc->ifp->if_vnet);
1604 
1605 	/* get current time */
1606 	binuptime(&lc->lro_last_queue_time);
1607 
1608 	/* sort all mbufs according to stream */
1609 	tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
1610 
1611 	/* input data into LRO engine, stream by stream */
1612 	seq = 0;
1613 	for (x = 0; x != lc->lro_mbuf_count; x++) {
1614 		struct mbuf *mb;
1615 
1616 		/* get mbuf */
1617 		mb = lc->lro_mbuf_data[x].mb;
1618 
1619 		/* get sequence number, masking away the packet index */
1620 		nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
1621 
1622 		/* check for new stream */
1623 		if (seq != nseq) {
1624 			seq = nseq;
1625 
1626 			/* flush active streams */
1627 			tcp_lro_rx_done(lc);
1628 		}
1629 
1630 		/* add packet to LRO engine */
1631 		if (tcp_lro_rx_common(lc, mb, 0, false) != 0) {
1632  			/* Flush anything we have acummulated */
1633  			tcp_lro_flush_active(lc);
1634 			/* input packet to network layer */
1635 			(*lc->ifp->if_input)(lc->ifp, mb);
1636 			lc->lro_queued++;
1637 			lc->lro_flushed++;
1638 		}
1639 	}
1640 	CURVNET_RESTORE();
1641 done:
1642 	/* flush active streams */
1643 	tcp_lro_rx_done(lc);
1644 
1645 #ifdef TCPHPTS
1646 	tcp_run_hpts();
1647 #endif
1648 	lc->lro_mbuf_count = 0;
1649 }
1650 
1651 #ifdef TCPHPTS
1652 static void
1653 build_ack_entry(struct tcp_ackent *ae, struct tcphdr *th, struct mbuf *m,
1654     uint32_t *ts_ptr, uint16_t iptos)
1655 {
1656 	/*
1657 	 * Given a TCP ACK, summarize it down into the small TCP ACK
1658 	 * entry.
1659 	 */
1660 	ae->timestamp = m->m_pkthdr.rcv_tstmp;
1661 	ae->flags = 0;
1662 	if (m->m_flags & M_TSTMP_LRO)
1663 		ae->flags |= TSTMP_LRO;
1664 	else if (m->m_flags & M_TSTMP)
1665 		ae->flags |= TSTMP_HDWR;
1666 	ae->seq = ntohl(th->th_seq);
1667 	ae->ack = ntohl(th->th_ack);
1668 	ae->flags |= tcp_get_flags(th);
1669 	if (ts_ptr != NULL) {
1670 		ae->ts_value = ntohl(ts_ptr[1]);
1671 		ae->ts_echo = ntohl(ts_ptr[2]);
1672 		ae->flags |= HAS_TSTMP;
1673 	}
1674 	ae->win = ntohs(th->th_win);
1675 	ae->codepoint = iptos;
1676 }
1677 
1678 /*
1679  * Do BPF tap for either ACK_CMP packets or MBUF QUEUE type packets
1680  * and strip all, but the IPv4/IPv6 header.
1681  */
1682 static bool
1683 do_bpf_strip_and_compress(struct inpcb *inp, struct lro_ctrl *lc,
1684     struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf **mv_to,
1685     bool *should_wake, bool bpf_req, bool lagg_bpf_req, struct ifnet *lagg_ifp, bool can_append_old_cmp)
1686 {
1687 	union {
1688 		void *ptr;
1689 		struct ip *ip4;
1690 		struct ip6_hdr *ip6;
1691 	} l3;
1692 	struct mbuf *m;
1693 	struct mbuf *nm;
1694 	struct tcphdr *th;
1695 	struct tcp_ackent *ack_ent;
1696 	uint32_t *ts_ptr;
1697 	int32_t n_mbuf;
1698 	bool other_opts, can_compress;
1699 	uint8_t lro_type;
1700 	uint16_t iptos;
1701 	int tcp_hdr_offset;
1702 	int idx;
1703 
1704 	/* Get current mbuf. */
1705 	m = *pp;
1706 
1707 	/* Let the BPF see the packet */
1708 	if (__predict_false(bpf_req))
1709 		ETHER_BPF_MTAP(lc->ifp, m);
1710 
1711 	if (__predict_false(lagg_bpf_req))
1712 		ETHER_BPF_MTAP(lagg_ifp, m);
1713 
1714 	tcp_hdr_offset = m->m_pkthdr.lro_tcp_h_off;
1715 	lro_type = le->inner.data.lro_type;
1716 	switch (lro_type) {
1717 	case LRO_TYPE_NONE:
1718 		lro_type = le->outer.data.lro_type;
1719 		switch (lro_type) {
1720 		case LRO_TYPE_IPV4_TCP:
1721 			tcp_hdr_offset -= sizeof(*le->outer.ip4);
1722 			m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1723 			break;
1724 		case LRO_TYPE_IPV6_TCP:
1725 			tcp_hdr_offset -= sizeof(*le->outer.ip6);
1726 			m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1727 			break;
1728 		default:
1729 			goto compressed;
1730 		}
1731 		break;
1732 	case LRO_TYPE_IPV4_TCP:
1733 		tcp_hdr_offset -= sizeof(*le->outer.ip4);
1734 		m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1735 		break;
1736 	case LRO_TYPE_IPV6_TCP:
1737 		tcp_hdr_offset -= sizeof(*le->outer.ip6);
1738 		m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1739 		break;
1740 	default:
1741 		goto compressed;
1742 	}
1743 
1744 	MPASS(tcp_hdr_offset >= 0);
1745 
1746 	m_adj(m, tcp_hdr_offset);
1747 	m->m_flags |= M_LRO_EHDRSTRP;
1748 	m->m_flags &= ~M_ACKCMP;
1749 	m->m_pkthdr.lro_tcp_h_off -= tcp_hdr_offset;
1750 
1751 	th = tcp_lro_get_th(m);
1752 
1753 	th->th_sum = 0;		/* TCP checksum is valid. */
1754 
1755 	/* Check if ACK can be compressed */
1756 	can_compress = tcp_lro_ack_valid(m, th, &ts_ptr, &other_opts);
1757 
1758 	/* Now lets look at the should wake states */
1759 	if ((other_opts == true) &&
1760 	    ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) == 0)) {
1761 		/*
1762 		 * If there are other options (SACK?) and the
1763 		 * tcp endpoint has not expressly told us it does
1764 		 * not care about SACKS, then we should wake up.
1765 		 */
1766 		*should_wake = true;
1767 	} else if (*should_wake == false) {
1768 		/* Wakeup override check if we are false here  */
1769 		*should_wake = tcp_lro_check_wake_status(inp);
1770 	}
1771 	/* Is the ack compressable? */
1772 	if (can_compress == false)
1773 		goto done;
1774 	/* Does the TCP endpoint support ACK compression? */
1775 	if ((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0)
1776 		goto done;
1777 
1778 	/* Lets get the TOS/traffic class field */
1779 	l3.ptr = mtod(m, void *);
1780 	switch (lro_type) {
1781 	case LRO_TYPE_IPV4_TCP:
1782 		iptos = l3.ip4->ip_tos;
1783 		break;
1784 	case LRO_TYPE_IPV6_TCP:
1785 		iptos = IPV6_TRAFFIC_CLASS(l3.ip6);
1786 		break;
1787 	default:
1788 		iptos = 0;	/* Keep compiler happy. */
1789 		break;
1790 	}
1791 	/* Now lets get space if we don't have some already */
1792 	if (*cmp == NULL) {
1793 new_one:
1794 		nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf, can_append_old_cmp);
1795 		if (__predict_false(nm == NULL))
1796 			goto done;
1797 		*cmp = nm;
1798 		if (n_mbuf) {
1799 			/*
1800 			 *  Link in the new cmp ack to our in-order place,
1801 			 * first set our cmp ack's next to where we are.
1802 			 */
1803 			nm->m_nextpkt = m;
1804 			(*pp) = nm;
1805 			/*
1806 			 * Set it up so mv_to is advanced to our
1807 			 * compressed ack. This way the caller can
1808 			 * advance pp to the right place.
1809 			 */
1810 			*mv_to = nm;
1811 			/*
1812 			 * Advance it here locally as well.
1813 			 */
1814 			pp = &nm->m_nextpkt;
1815 		}
1816 	} else {
1817 		/* We have one already we are working on */
1818 		nm = *cmp;
1819 		if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) {
1820 			/* We ran out of space */
1821 			inp->inp_flags2 |= INP_MBUF_L_ACKS;
1822 			goto new_one;
1823 		}
1824 	}
1825 	MPASS(M_TRAILINGSPACE(nm) >= sizeof(struct tcp_ackent));
1826 	counter_u64_add(tcp_inp_lro_compressed, 1);
1827 	le->compressed++;
1828 	/* We can add in to the one on the tail */
1829 	ack_ent = mtod(nm, struct tcp_ackent *);
1830 	idx = (nm->m_len / sizeof(struct tcp_ackent));
1831 	build_ack_entry(&ack_ent[idx], th, m, ts_ptr, iptos);
1832 
1833 	/* Bump the size of both pkt-hdr and len */
1834 	nm->m_len += sizeof(struct tcp_ackent);
1835 	nm->m_pkthdr.len += sizeof(struct tcp_ackent);
1836 compressed:
1837 	/* Advance to next mbuf before freeing. */
1838 	*pp = m->m_nextpkt;
1839 	m->m_nextpkt = NULL;
1840 	m_freem(m);
1841 	return (true);
1842 done:
1843 	counter_u64_add(tcp_uncomp_total, 1);
1844 	le->uncompressed++;
1845 	return (false);
1846 }
1847 #endif
1848 
1849 static struct lro_head *
1850 tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser)
1851 {
1852 	u_long hash;
1853 
1854 	if (M_HASHTYPE_ISHASH(m)) {
1855 		hash = m->m_pkthdr.flowid;
1856 	} else {
1857 		for (unsigned i = hash = 0; i != LRO_RAW_ADDRESS_MAX; i++)
1858 			hash += parser->data.raw[i];
1859 	}
1860 	return (&lc->lro_hash[hash % lc->lro_hashsz]);
1861 }
1862 
1863 static int
1864 tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash)
1865 {
1866 	struct lro_parser pi;	/* inner address data */
1867 	struct lro_parser po;	/* outer address data */
1868 	struct lro_parser *pa;	/* current parser for TCP stream */
1869 	struct lro_entry *le;
1870 	struct lro_head *bucket;
1871 	struct tcphdr *th;
1872 	int tcp_data_len;
1873 	int tcp_opt_len;
1874 	int error;
1875 	uint16_t tcp_data_sum;
1876 
1877 #ifdef INET
1878 	/* Quickly decide if packet cannot be LRO'ed */
1879 	if (__predict_false(V_ipforwarding != 0))
1880 		return (TCP_LRO_CANNOT);
1881 #endif
1882 #ifdef INET6
1883 	/* Quickly decide if packet cannot be LRO'ed */
1884 	if (__predict_false(V_ip6_forwarding != 0))
1885 		return (TCP_LRO_CANNOT);
1886 #endif
1887 	if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1888 	     ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1889 	    (m->m_pkthdr.csum_data != 0xffff)) {
1890 		/*
1891 		 * The checksum either did not have hardware offload
1892 		 * or it was a bad checksum. We can't LRO such
1893 		 * a packet.
1894 		 */
1895 		counter_u64_add(tcp_bad_csums, 1);
1896 		return (TCP_LRO_CANNOT);
1897 	}
1898 	/* We expect a contiguous header [eh, ip, tcp]. */
1899 	pa = tcp_lro_parser(m, &po, &pi, true);
1900 	if (__predict_false(pa == NULL))
1901 		return (TCP_LRO_NOT_SUPPORTED);
1902 
1903 	/* We don't expect any padding. */
1904 	error = tcp_lro_trim_mbuf_chain(m, pa);
1905 	if (__predict_false(error != 0))
1906 		return (error);
1907 
1908 #ifdef INET
1909 	switch (pa->data.lro_type) {
1910 	case LRO_TYPE_IPV4_TCP:
1911 		error = tcp_lro_rx_ipv4(lc, m, pa->ip4);
1912 		if (__predict_false(error != 0))
1913 			return (error);
1914 		break;
1915 	default:
1916 		break;
1917 	}
1918 #endif
1919 	/* If no hardware or arrival stamp on the packet add timestamp */
1920 	if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) {
1921 		m->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
1922 		m->m_flags |= M_TSTMP_LRO;
1923 	}
1924 
1925 	/* Get pointer to TCP header. */
1926 	th = pa->tcp;
1927 
1928 	/* Don't process SYN packets. */
1929 	if (__predict_false(tcp_get_flags(th) & TH_SYN))
1930 		return (TCP_LRO_CANNOT);
1931 
1932 	/* Get total TCP header length and compute payload length. */
1933 	tcp_opt_len = (th->th_off << 2);
1934 	tcp_data_len = m->m_pkthdr.len - ((uint8_t *)th -
1935 	    (uint8_t *)m->m_data) - tcp_opt_len;
1936 	tcp_opt_len -= sizeof(*th);
1937 
1938 	/* Don't process invalid TCP headers. */
1939 	if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0))
1940 		return (TCP_LRO_CANNOT);
1941 
1942 	/* Compute TCP data only checksum. */
1943 	if (tcp_data_len == 0)
1944 		tcp_data_sum = 0;	/* no data, no checksum */
1945 	else if (__predict_false(csum != 0))
1946 		tcp_data_sum = tcp_lro_rx_csum_data(pa, ~csum);
1947 	else
1948 		tcp_data_sum = tcp_lro_rx_csum_data(pa, ~th->th_sum);
1949 
1950 	/* Save TCP info in mbuf. */
1951 	m->m_nextpkt = NULL;
1952 	m->m_pkthdr.rcvif = lc->ifp;
1953 	m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum;
1954 	m->m_pkthdr.lro_tcp_d_len = tcp_data_len;
1955 	m->m_pkthdr.lro_tcp_h_off = ((uint8_t *)th - (uint8_t *)m->m_data);
1956 	m->m_pkthdr.lro_nsegs = 1;
1957 
1958 	/* Get hash bucket. */
1959 	if (!use_hash) {
1960 		bucket = &lc->lro_hash[0];
1961 	} else {
1962 		bucket = tcp_lro_rx_get_bucket(lc, m, pa);
1963 	}
1964 
1965 	/* Try to find a matching previous segment. */
1966 	LIST_FOREACH(le, bucket, hash_next) {
1967 		/* Compare addresses and ports. */
1968 		if (lro_address_compare(&po.data, &le->outer.data) == false ||
1969 		    lro_address_compare(&pi.data, &le->inner.data) == false)
1970 			continue;
1971 
1972 		/* Check if no data and old ACK. */
1973 		if (tcp_data_len == 0 &&
1974 		    SEQ_LT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1975 			m_freem(m);
1976 			return (0);
1977 		}
1978 
1979 		/* Mark "m" in the last spot. */
1980 		le->m_last_mbuf->m_nextpkt = m;
1981 		/* Now set the tail to "m". */
1982 		le->m_last_mbuf = m;
1983 		return (0);
1984 	}
1985 
1986 	/* Try to find an empty slot. */
1987 	if (LIST_EMPTY(&lc->lro_free))
1988 		return (TCP_LRO_NO_ENTRIES);
1989 
1990 	/* Start a new segment chain. */
1991 	le = LIST_FIRST(&lc->lro_free);
1992 	LIST_REMOVE(le, next);
1993 	tcp_lro_active_insert(lc, bucket, le);
1994 
1995 	/* Make sure the headers are set. */
1996 	le->inner = pi;
1997 	le->outer = po;
1998 
1999 	/* Store time this entry was allocated. */
2000 	le->alloc_time = lc->lro_last_queue_time;
2001 
2002 	tcp_set_entry_to_mbuf(lc, le, m, th);
2003 
2004 	/* Now set the tail to "m". */
2005 	le->m_last_mbuf = m;
2006 
2007 	return (0);
2008 }
2009 
2010 int
2011 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
2012 {
2013 	int error;
2014 
2015 	if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
2016 	     ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
2017 	    (m->m_pkthdr.csum_data != 0xffff)) {
2018 		/*
2019 		 * The checksum either did not have hardware offload
2020 		 * or it was a bad checksum. We can't LRO such
2021 		 * a packet.
2022 		 */
2023 		counter_u64_add(tcp_bad_csums, 1);
2024 		return (TCP_LRO_CANNOT);
2025 	}
2026 	/* get current time */
2027 	binuptime(&lc->lro_last_queue_time);
2028 	CURVNET_SET(lc->ifp->if_vnet);
2029 	error = tcp_lro_rx_common(lc, m, csum, true);
2030 	if (__predict_false(error != 0)) {
2031 		/*
2032 		 * Flush anything we have acummulated
2033 		 * ahead of this packet that can't
2034 		 * be LRO'd. This preserves order.
2035 		 */
2036 		tcp_lro_flush_active(lc);
2037 	}
2038 	CURVNET_RESTORE();
2039 
2040 	return (error);
2041 }
2042 
2043 void
2044 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
2045 {
2046 	NET_EPOCH_ASSERT();
2047 	/* sanity checks */
2048 	if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
2049 	    lc->lro_mbuf_max == 0)) {
2050 		/* packet drop */
2051 		m_freem(mb);
2052 		return;
2053 	}
2054 
2055 	/* check if packet is not LRO capable */
2056 	if (__predict_false((lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
2057 		/* input packet to network layer */
2058 		(*lc->ifp->if_input) (lc->ifp, mb);
2059 		return;
2060 	}
2061 
2062  	/* If no hardware or arrival stamp on the packet add timestamp */
2063  	if ((tcplro_stacks_wanting_mbufq > 0) &&
2064  	    (tcp_less_accurate_lro_ts == 0) &&
2065  	    ((mb->m_flags & M_TSTMP) == 0)) {
2066  		/* Add in an LRO time since no hardware */
2067  		binuptime(&lc->lro_last_queue_time);
2068  		mb->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
2069  		mb->m_flags |= M_TSTMP_LRO;
2070  	}
2071 
2072 	/* create sequence number */
2073 	lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
2074 	    (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
2075 	    (((uint64_t)mb->m_pkthdr.flowid) << 24) |
2076 	    ((uint64_t)lc->lro_mbuf_count);
2077 
2078 	/* enter mbuf */
2079 	lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
2080 
2081 	/* flush if array is full */
2082 	if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max))
2083 		tcp_lro_flush_all(lc);
2084 }
2085 
2086 /* end */
2087