xref: /freebsd/sys/dev/iavf/iavf_txrx_iflib.c (revision 71625ec9)
1ca853deeSEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */
2ca853deeSEric Joyner /*  Copyright (c) 2021, Intel Corporation
3ca853deeSEric Joyner  *  All rights reserved.
4ca853deeSEric Joyner  *
5ca853deeSEric Joyner  *  Redistribution and use in source and binary forms, with or without
6ca853deeSEric Joyner  *  modification, are permitted provided that the following conditions are met:
7ca853deeSEric Joyner  *
8ca853deeSEric Joyner  *   1. Redistributions of source code must retain the above copyright notice,
9ca853deeSEric Joyner  *      this list of conditions and the following disclaimer.
10ca853deeSEric Joyner  *
11ca853deeSEric Joyner  *   2. Redistributions in binary form must reproduce the above copyright
12ca853deeSEric Joyner  *      notice, this list of conditions and the following disclaimer in the
13ca853deeSEric Joyner  *      documentation and/or other materials provided with the distribution.
14ca853deeSEric Joyner  *
15ca853deeSEric Joyner  *   3. Neither the name of the Intel Corporation nor the names of its
16ca853deeSEric Joyner  *      contributors may be used to endorse or promote products derived from
17ca853deeSEric Joyner  *      this software without specific prior written permission.
18ca853deeSEric Joyner  *
19ca853deeSEric Joyner  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20ca853deeSEric Joyner  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21ca853deeSEric Joyner  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22ca853deeSEric Joyner  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23ca853deeSEric Joyner  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24ca853deeSEric Joyner  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25ca853deeSEric Joyner  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26ca853deeSEric Joyner  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27ca853deeSEric Joyner  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28ca853deeSEric Joyner  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29ca853deeSEric Joyner  *  POSSIBILITY OF SUCH DAMAGE.
30ca853deeSEric Joyner  */
31ca853deeSEric Joyner 
32ca853deeSEric Joyner /**
33ca853deeSEric Joyner  * @file iavf_txrx_iflib.c
34ca853deeSEric Joyner  * @brief Tx/Rx hotpath implementation for the iflib driver
35ca853deeSEric Joyner  *
36ca853deeSEric Joyner  * Contains functions used to implement the Tx and Rx hotpaths of the iflib
37ca853deeSEric Joyner  * driver implementation.
38ca853deeSEric Joyner  */
39ca853deeSEric Joyner #include "iavf_iflib.h"
40ca853deeSEric Joyner #include "iavf_txrx_common.h"
41ca853deeSEric Joyner 
42ca853deeSEric Joyner #ifdef RSS
43ca853deeSEric Joyner #include <net/rss_config.h>
44ca853deeSEric Joyner #endif
45ca853deeSEric Joyner 
46ca853deeSEric Joyner /* Local Prototypes */
47ca853deeSEric Joyner static void	iavf_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype);
48ca853deeSEric Joyner 
49ca853deeSEric Joyner static int	iavf_isc_txd_encap(void *arg, if_pkt_info_t pi);
50ca853deeSEric Joyner static void	iavf_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
51ca853deeSEric Joyner static int	iavf_isc_txd_credits_update_hwb(void *arg, uint16_t txqid, bool clear);
52ca853deeSEric Joyner static int	iavf_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear);
53ca853deeSEric Joyner 
54ca853deeSEric Joyner static void	iavf_isc_rxd_refill(void *arg, if_rxd_update_t iru);
55ca853deeSEric Joyner static void	iavf_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
56ca853deeSEric Joyner 				  qidx_t pidx);
57ca853deeSEric Joyner static int	iavf_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
58ca853deeSEric Joyner 				      qidx_t budget);
59ca853deeSEric Joyner static int	iavf_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
60ca853deeSEric Joyner 
61ca853deeSEric Joyner /**
62ca853deeSEric Joyner  * @var iavf_txrx_hwb
63ca853deeSEric Joyner  * @brief iflib Tx/Rx operations for head write back
64ca853deeSEric Joyner  *
65ca853deeSEric Joyner  * iflib ops structure for when operating the device in head write back mode.
66ca853deeSEric Joyner  */
67ca853deeSEric Joyner struct if_txrx iavf_txrx_hwb = {
68ca853deeSEric Joyner 	iavf_isc_txd_encap,
69ca853deeSEric Joyner 	iavf_isc_txd_flush,
70ca853deeSEric Joyner 	iavf_isc_txd_credits_update_hwb,
71ca853deeSEric Joyner 	iavf_isc_rxd_available,
72ca853deeSEric Joyner 	iavf_isc_rxd_pkt_get,
73ca853deeSEric Joyner 	iavf_isc_rxd_refill,
74ca853deeSEric Joyner 	iavf_isc_rxd_flush,
75ca853deeSEric Joyner 	NULL
76ca853deeSEric Joyner };
77ca853deeSEric Joyner 
78ca853deeSEric Joyner /**
79ca853deeSEric Joyner  * @var iavf_txrx_dwb
80ca853deeSEric Joyner  * @brief iflib Tx/Rx operations for descriptor write back
81ca853deeSEric Joyner  *
82ca853deeSEric Joyner  * iflib ops structure for when operating the device in descriptor write back
83ca853deeSEric Joyner  * mode.
84ca853deeSEric Joyner  */
85ca853deeSEric Joyner struct if_txrx iavf_txrx_dwb = {
86ca853deeSEric Joyner 	iavf_isc_txd_encap,
87ca853deeSEric Joyner 	iavf_isc_txd_flush,
88ca853deeSEric Joyner 	iavf_isc_txd_credits_update_dwb,
89ca853deeSEric Joyner 	iavf_isc_rxd_available,
90ca853deeSEric Joyner 	iavf_isc_rxd_pkt_get,
91ca853deeSEric Joyner 	iavf_isc_rxd_refill,
92ca853deeSEric Joyner 	iavf_isc_rxd_flush,
93ca853deeSEric Joyner 	NULL
94ca853deeSEric Joyner };
95ca853deeSEric Joyner 
96ca853deeSEric Joyner /**
97ca853deeSEric Joyner  * iavf_is_tx_desc_done - Check if a Tx descriptor is ready
98ca853deeSEric Joyner  * @txr: the Tx ring to check in
99ca853deeSEric Joyner  * @idx: ring index to check
100ca853deeSEric Joyner  *
101ca853deeSEric Joyner  * @returns true if the descriptor has been written back by hardware, and
102ca853deeSEric Joyner  * false otherwise.
103ca853deeSEric Joyner  */
104ca853deeSEric Joyner static bool
iavf_is_tx_desc_done(struct tx_ring * txr,int idx)105ca853deeSEric Joyner iavf_is_tx_desc_done(struct tx_ring *txr, int idx)
106ca853deeSEric Joyner {
107ca853deeSEric Joyner 	return (((txr->tx_base[idx].cmd_type_offset_bsz >> IAVF_TXD_QW1_DTYPE_SHIFT)
108ca853deeSEric Joyner 	    & IAVF_TXD_QW1_DTYPE_MASK) == IAVF_TX_DESC_DTYPE_DESC_DONE);
109ca853deeSEric Joyner }
110ca853deeSEric Joyner 
111ca853deeSEric Joyner 
112ca853deeSEric Joyner /**
113ca853deeSEric Joyner  * iavf_tso_detect_sparse - detect TSO packets with too many segments
114ca853deeSEric Joyner  * @segs: packet segments array
115ca853deeSEric Joyner  * @nsegs: number of packet segments
116ca853deeSEric Joyner  * @pi: packet information
117ca853deeSEric Joyner  *
118ca853deeSEric Joyner  * Hardware only transmits packets with a maximum of 8 descriptors. For TSO
119ca853deeSEric Joyner  * packets, hardware needs to be able to build the split packets using 8 or
120ca853deeSEric Joyner  * fewer descriptors. Additionally, the header must be contained within at
121ca853deeSEric Joyner  * most 3 descriptors.
122ca853deeSEric Joyner  *
123ca853deeSEric Joyner  * To verify this, we walk the headers to find out how many descriptors the
124ca853deeSEric Joyner  * headers require (usually 1). Then we ensure that, for each TSO segment, its
125ca853deeSEric Joyner  * data plus the headers are contained within 8 or fewer descriptors.
126ca853deeSEric Joyner  *
127ca853deeSEric Joyner  * @returns zero if the packet is valid, one otherwise.
128ca853deeSEric Joyner  */
129ca853deeSEric Joyner static int
iavf_tso_detect_sparse(bus_dma_segment_t * segs,int nsegs,if_pkt_info_t pi)130ca853deeSEric Joyner iavf_tso_detect_sparse(bus_dma_segment_t *segs, int nsegs, if_pkt_info_t pi)
131ca853deeSEric Joyner {
132ca853deeSEric Joyner 	int	count, curseg, i, hlen, segsz, seglen, tsolen;
133ca853deeSEric Joyner 
134ca853deeSEric Joyner 	if (nsegs <= IAVF_MAX_TX_SEGS-2)
135ca853deeSEric Joyner 		return (0);
136ca853deeSEric Joyner 	segsz = pi->ipi_tso_segsz;
137ca853deeSEric Joyner 	curseg = count = 0;
138ca853deeSEric Joyner 
139ca853deeSEric Joyner 	hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
140ca853deeSEric Joyner 	tsolen = pi->ipi_len - hlen;
141ca853deeSEric Joyner 
142ca853deeSEric Joyner 	i = 0;
143ca853deeSEric Joyner 	curseg = segs[0].ds_len;
144ca853deeSEric Joyner 	while (hlen > 0) {
145ca853deeSEric Joyner 		count++;
146ca853deeSEric Joyner 		if (count > IAVF_MAX_TX_SEGS - 2)
147ca853deeSEric Joyner 			return (1);
148ca853deeSEric Joyner 		if (curseg == 0) {
149ca853deeSEric Joyner 			i++;
150ca853deeSEric Joyner 			if (__predict_false(i == nsegs))
151ca853deeSEric Joyner 				return (1);
152ca853deeSEric Joyner 
153ca853deeSEric Joyner 			curseg = segs[i].ds_len;
154ca853deeSEric Joyner 		}
155ca853deeSEric Joyner 		seglen = min(curseg, hlen);
156ca853deeSEric Joyner 		curseg -= seglen;
157ca853deeSEric Joyner 		hlen -= seglen;
158ca853deeSEric Joyner 	}
159ca853deeSEric Joyner 	while (tsolen > 0) {
160ca853deeSEric Joyner 		segsz = pi->ipi_tso_segsz;
161ca853deeSEric Joyner 		while (segsz > 0 && tsolen != 0) {
162ca853deeSEric Joyner 			count++;
163ca853deeSEric Joyner 			if (count > IAVF_MAX_TX_SEGS - 2) {
164ca853deeSEric Joyner 				return (1);
165ca853deeSEric Joyner 			}
166ca853deeSEric Joyner 			if (curseg == 0) {
167ca853deeSEric Joyner 				i++;
168ca853deeSEric Joyner 				if (__predict_false(i == nsegs)) {
169ca853deeSEric Joyner 					return (1);
170ca853deeSEric Joyner 				}
171ca853deeSEric Joyner 				curseg = segs[i].ds_len;
172ca853deeSEric Joyner 			}
173ca853deeSEric Joyner 			seglen = min(curseg, segsz);
174ca853deeSEric Joyner 			segsz -= seglen;
175ca853deeSEric Joyner 			curseg -= seglen;
176ca853deeSEric Joyner 			tsolen -= seglen;
177ca853deeSEric Joyner 		}
178ca853deeSEric Joyner 		count = 0;
179ca853deeSEric Joyner 	}
180ca853deeSEric Joyner 
181ca853deeSEric Joyner 	return (0);
182ca853deeSEric Joyner }
183ca853deeSEric Joyner 
184ca853deeSEric Joyner /**
185ca853deeSEric Joyner  * iavf_tx_setup_offload - Setup Tx offload parameters
186ca853deeSEric Joyner  * @que: pointer to the Tx queue
187ca853deeSEric Joyner  * @pi: Tx packet info
188ca853deeSEric Joyner  * @cmd: pointer to command descriptor value
189ca853deeSEric Joyner  * @off: pointer to offset descriptor value
190ca853deeSEric Joyner  *
191ca853deeSEric Joyner  * Based on packet type and Tx offloads requested, sets up the command and
192ca853deeSEric Joyner  * offset values for a Tx descriptor to enable the requested offloads.
193ca853deeSEric Joyner  */
194ca853deeSEric Joyner static void
iavf_tx_setup_offload(struct iavf_tx_queue * que __unused,if_pkt_info_t pi,u32 * cmd,u32 * off)195ca853deeSEric Joyner iavf_tx_setup_offload(struct iavf_tx_queue *que __unused,
196ca853deeSEric Joyner     if_pkt_info_t pi, u32 *cmd, u32 *off)
197ca853deeSEric Joyner {
198ca853deeSEric Joyner 	switch (pi->ipi_etype) {
199ca853deeSEric Joyner #ifdef INET
200ca853deeSEric Joyner 		case ETHERTYPE_IP:
201ca853deeSEric Joyner 			if (pi->ipi_csum_flags & IAVF_CSUM_IPV4)
202ca853deeSEric Joyner 				*cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
203ca853deeSEric Joyner 			else
204ca853deeSEric Joyner 				*cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
205ca853deeSEric Joyner 			break;
206ca853deeSEric Joyner #endif
207ca853deeSEric Joyner #ifdef INET6
208ca853deeSEric Joyner 		case ETHERTYPE_IPV6:
209ca853deeSEric Joyner 			*cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
210ca853deeSEric Joyner 			break;
211ca853deeSEric Joyner #endif
212ca853deeSEric Joyner 		default:
213ca853deeSEric Joyner 			break;
214ca853deeSEric Joyner 	}
215ca853deeSEric Joyner 
216ca853deeSEric Joyner 	*off |= (pi->ipi_ehdrlen >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
217ca853deeSEric Joyner 	*off |= (pi->ipi_ip_hlen >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
218ca853deeSEric Joyner 
219ca853deeSEric Joyner 	switch (pi->ipi_ipproto) {
220ca853deeSEric Joyner 		case IPPROTO_TCP:
221ca853deeSEric Joyner 			if (pi->ipi_csum_flags & IAVF_CSUM_TCP) {
222ca853deeSEric Joyner 				*cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
223ca853deeSEric Joyner 				*off |= (pi->ipi_tcp_hlen >> 2) <<
224ca853deeSEric Joyner 				    IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
225ca853deeSEric Joyner 				/* Check for NO_HEAD MDD event */
226ca853deeSEric Joyner 				MPASS(pi->ipi_tcp_hlen != 0);
227ca853deeSEric Joyner 			}
228ca853deeSEric Joyner 			break;
229ca853deeSEric Joyner 		case IPPROTO_UDP:
230ca853deeSEric Joyner 			if (pi->ipi_csum_flags & IAVF_CSUM_UDP) {
231ca853deeSEric Joyner 				*cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
232ca853deeSEric Joyner 				*off |= (sizeof(struct udphdr) >> 2) <<
233ca853deeSEric Joyner 				    IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
234ca853deeSEric Joyner 			}
235ca853deeSEric Joyner 			break;
236ca853deeSEric Joyner 		case IPPROTO_SCTP:
237ca853deeSEric Joyner 			if (pi->ipi_csum_flags & IAVF_CSUM_SCTP) {
238ca853deeSEric Joyner 				*cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
239ca853deeSEric Joyner 				*off |= (sizeof(struct sctphdr) >> 2) <<
240ca853deeSEric Joyner 				    IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
241ca853deeSEric Joyner 			}
242ca853deeSEric Joyner 			/* Fall Thru */
243ca853deeSEric Joyner 		default:
244ca853deeSEric Joyner 			break;
245ca853deeSEric Joyner 	}
246ca853deeSEric Joyner }
247ca853deeSEric Joyner 
248ca853deeSEric Joyner /**
249ca853deeSEric Joyner  * iavf_tso_setup - Setup TSO context descriptor
250ca853deeSEric Joyner  * @txr: the Tx ring to process
251ca853deeSEric Joyner  * @pi: packet info structure
252ca853deeSEric Joyner  *
253ca853deeSEric Joyner  * Enable hardware segmentation offload (TSO) for a given packet by creating
254ca853deeSEric Joyner  * a context descriptor with the necessary details for offloading.
255ca853deeSEric Joyner  *
256ca853deeSEric Joyner  * @returns the new ring index to use for the data descriptor.
257ca853deeSEric Joyner  */
258ca853deeSEric Joyner static int
iavf_tso_setup(struct tx_ring * txr,if_pkt_info_t pi)259ca853deeSEric Joyner iavf_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
260ca853deeSEric Joyner {
261ca853deeSEric Joyner 	if_softc_ctx_t			scctx;
262ca853deeSEric Joyner 	struct iavf_tx_context_desc	*TXD;
263ca853deeSEric Joyner 	u32				cmd, mss, type, tsolen;
264ca853deeSEric Joyner 	int				idx, total_hdr_len;
265ca853deeSEric Joyner 	u64				type_cmd_tso_mss;
266ca853deeSEric Joyner 
267ca853deeSEric Joyner 	idx = pi->ipi_pidx;
268ca853deeSEric Joyner 	TXD = (struct iavf_tx_context_desc *) &txr->tx_base[idx];
269ca853deeSEric Joyner 	total_hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
270ca853deeSEric Joyner 	tsolen = pi->ipi_len - total_hdr_len;
271ca853deeSEric Joyner 	scctx = txr->que->vsi->shared;
272ca853deeSEric Joyner 
273ca853deeSEric Joyner 	type = IAVF_TX_DESC_DTYPE_CONTEXT;
274ca853deeSEric Joyner 	cmd = IAVF_TX_CTX_DESC_TSO;
275ca853deeSEric Joyner 	/*
276ca853deeSEric Joyner 	 * TSO MSS must not be less than 64; this prevents a
277ca853deeSEric Joyner 	 * BAD_LSO_MSS MDD event when the MSS is too small.
278ca853deeSEric Joyner 	 */
279ca853deeSEric Joyner 	if (pi->ipi_tso_segsz < IAVF_MIN_TSO_MSS) {
280ca853deeSEric Joyner 		txr->mss_too_small++;
281ca853deeSEric Joyner 		pi->ipi_tso_segsz = IAVF_MIN_TSO_MSS;
282ca853deeSEric Joyner 	}
283ca853deeSEric Joyner 	mss = pi->ipi_tso_segsz;
284ca853deeSEric Joyner 
285ca853deeSEric Joyner 	/* Check for BAD_LS0_MSS MDD event (mss too large) */
286ca853deeSEric Joyner 	MPASS(mss <= IAVF_MAX_TSO_MSS);
287ca853deeSEric Joyner 	/* Check for NO_HEAD MDD event (header lengths are 0) */
288ca853deeSEric Joyner 	MPASS(pi->ipi_ehdrlen != 0);
289ca853deeSEric Joyner 	MPASS(pi->ipi_ip_hlen != 0);
290ca853deeSEric Joyner 	/* Partial check for BAD_LSO_LEN MDD event */
291ca853deeSEric Joyner 	MPASS(tsolen != 0);
292ca853deeSEric Joyner 	/* Partial check for WRONG_SIZE MDD event (during TSO) */
293ca853deeSEric Joyner 	MPASS(total_hdr_len + mss <= IAVF_MAX_FRAME);
294ca853deeSEric Joyner 
295ca853deeSEric Joyner 	type_cmd_tso_mss = ((u64)type << IAVF_TXD_CTX_QW1_DTYPE_SHIFT) |
296ca853deeSEric Joyner 	    ((u64)cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
297ca853deeSEric Joyner 	    ((u64)tsolen << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
298ca853deeSEric Joyner 	    ((u64)mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
299ca853deeSEric Joyner 	TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
300ca853deeSEric Joyner 
301ca853deeSEric Joyner 	TXD->tunneling_params = htole32(0);
302ca853deeSEric Joyner 	txr->que->tso++;
303ca853deeSEric Joyner 
304ca853deeSEric Joyner 	return ((idx + 1) & (scctx->isc_ntxd[0]-1));
305ca853deeSEric Joyner }
306ca853deeSEric Joyner 
307ca853deeSEric Joyner #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
308ca853deeSEric Joyner 
309ca853deeSEric Joyner /**
310ca853deeSEric Joyner  * iavf_isc_txd_encap - Encapsulate a Tx packet into descriptors
311ca853deeSEric Joyner  * @arg: void pointer to the VSI structure
312ca853deeSEric Joyner  * @pi: packet info to encapsulate
313ca853deeSEric Joyner  *
314ca853deeSEric Joyner  * This routine maps the mbufs to tx descriptors, allowing the
315ca853deeSEric Joyner  * TX engine to transmit the packets.
316ca853deeSEric Joyner  *
317ca853deeSEric Joyner  * @returns 0 on success, positive on failure
318ca853deeSEric Joyner  */
319ca853deeSEric Joyner static int
iavf_isc_txd_encap(void * arg,if_pkt_info_t pi)320ca853deeSEric Joyner iavf_isc_txd_encap(void *arg, if_pkt_info_t pi)
321ca853deeSEric Joyner {
322ca853deeSEric Joyner 	struct iavf_vsi		*vsi = arg;
323ca853deeSEric Joyner 	if_softc_ctx_t		scctx = vsi->shared;
324ca853deeSEric Joyner 	struct iavf_tx_queue	*que = &vsi->tx_queues[pi->ipi_qsidx];
325ca853deeSEric Joyner 	struct tx_ring		*txr = &que->txr;
326ca853deeSEric Joyner 	int			nsegs = pi->ipi_nsegs;
327ca853deeSEric Joyner 	bus_dma_segment_t *segs = pi->ipi_segs;
328ca853deeSEric Joyner 	struct iavf_tx_desc	*txd = NULL;
329ca853deeSEric Joyner 	int			i, j, mask, pidx_last;
330ca853deeSEric Joyner 	u32			cmd, off, tx_intr;
331ca853deeSEric Joyner 
332ca853deeSEric Joyner 	if (__predict_false(pi->ipi_len < IAVF_MIN_FRAME)) {
333ca853deeSEric Joyner 		que->pkt_too_small++;
334ca853deeSEric Joyner 		return (EINVAL);
335ca853deeSEric Joyner 	}
336ca853deeSEric Joyner 
337ca853deeSEric Joyner 	cmd = off = 0;
338ca853deeSEric Joyner 	i = pi->ipi_pidx;
339ca853deeSEric Joyner 
340ca853deeSEric Joyner 	tx_intr = (pi->ipi_flags & IPI_TX_INTR);
341ca853deeSEric Joyner 
342ca853deeSEric Joyner 	/* Set up the TSO/CSUM offload */
343ca853deeSEric Joyner 	if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
344ca853deeSEric Joyner 		/* Set up the TSO context descriptor if required */
345ca853deeSEric Joyner 		if (pi->ipi_csum_flags & CSUM_TSO) {
346ca853deeSEric Joyner 			/* Prevent MAX_BUFF MDD event (for TSO) */
347ca853deeSEric Joyner 			if (iavf_tso_detect_sparse(segs, nsegs, pi))
348ca853deeSEric Joyner 				return (EFBIG);
349ca853deeSEric Joyner 			i = iavf_tso_setup(txr, pi);
350ca853deeSEric Joyner 		}
351ca853deeSEric Joyner 		iavf_tx_setup_offload(que, pi, &cmd, &off);
352ca853deeSEric Joyner 	}
353ca853deeSEric Joyner 	if (pi->ipi_mflags & M_VLANTAG)
354ca853deeSEric Joyner 		cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
355ca853deeSEric Joyner 
356ca853deeSEric Joyner 	cmd |= IAVF_TX_DESC_CMD_ICRC;
357ca853deeSEric Joyner 	mask = scctx->isc_ntxd[0] - 1;
358ca853deeSEric Joyner 	/* Check for WRONG_SIZE MDD event */
359ca853deeSEric Joyner 	MPASS(pi->ipi_len >= IAVF_MIN_FRAME);
360ca853deeSEric Joyner #ifdef INVARIANTS
361ca853deeSEric Joyner 	if (!(pi->ipi_csum_flags & CSUM_TSO))
362ca853deeSEric Joyner 		MPASS(pi->ipi_len <= IAVF_MAX_FRAME);
363ca853deeSEric Joyner #endif
364ca853deeSEric Joyner 	for (j = 0; j < nsegs; j++) {
365ca853deeSEric Joyner 		bus_size_t seglen;
366ca853deeSEric Joyner 
367ca853deeSEric Joyner 		txd = &txr->tx_base[i];
368ca853deeSEric Joyner 		seglen = segs[j].ds_len;
369ca853deeSEric Joyner 
370ca853deeSEric Joyner 		/* Check for ZERO_BSIZE MDD event */
371ca853deeSEric Joyner 		MPASS(seglen != 0);
372ca853deeSEric Joyner 
373ca853deeSEric Joyner 		txd->buffer_addr = htole64(segs[j].ds_addr);
374ca853deeSEric Joyner 		txd->cmd_type_offset_bsz =
375ca853deeSEric Joyner 		    htole64(IAVF_TX_DESC_DTYPE_DATA
376ca853deeSEric Joyner 		    | ((u64)cmd  << IAVF_TXD_QW1_CMD_SHIFT)
377ca853deeSEric Joyner 		    | ((u64)off << IAVF_TXD_QW1_OFFSET_SHIFT)
378ca853deeSEric Joyner 		    | ((u64)seglen  << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
379ca853deeSEric Joyner 	            | ((u64)htole16(pi->ipi_vtag) << IAVF_TXD_QW1_L2TAG1_SHIFT));
380ca853deeSEric Joyner 
381ca853deeSEric Joyner 		txr->tx_bytes += seglen;
382ca853deeSEric Joyner 		pidx_last = i;
383ca853deeSEric Joyner 		i = (i+1) & mask;
384ca853deeSEric Joyner 	}
385ca853deeSEric Joyner 	/* Set the last descriptor for report */
386ca853deeSEric Joyner 	txd->cmd_type_offset_bsz |=
387ca853deeSEric Joyner 	    htole64(((u64)IAVF_TXD_CMD << IAVF_TXD_QW1_CMD_SHIFT));
388ca853deeSEric Joyner 	/* Add to report status array (if using TX interrupts) */
389ca853deeSEric Joyner 	if (!vsi->enable_head_writeback && tx_intr) {
390ca853deeSEric Joyner 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
391ca853deeSEric Joyner 		txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & mask;
392ca853deeSEric Joyner 		MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
393ca853deeSEric Joyner 	}
394ca853deeSEric Joyner 	pi->ipi_new_pidx = i;
395ca853deeSEric Joyner 
396ca853deeSEric Joyner 	++txr->tx_packets;
397ca853deeSEric Joyner 	return (0);
398ca853deeSEric Joyner }
399ca853deeSEric Joyner 
400ca853deeSEric Joyner /**
401ca853deeSEric Joyner  * iavf_isc_txd_flush - Flush Tx ring
402ca853deeSEric Joyner  * @arg: void pointer to the VSI
403ca853deeSEric Joyner  * @txqid: the Tx queue to flush
404ca853deeSEric Joyner  * @pidx: the ring index to flush to
405ca853deeSEric Joyner  *
406ca853deeSEric Joyner  * Advance the Transmit Descriptor Tail (Tdt), this tells the
407ca853deeSEric Joyner  * hardware that this frame is available to transmit.
408ca853deeSEric Joyner  */
409ca853deeSEric Joyner static void
iavf_isc_txd_flush(void * arg,uint16_t txqid,qidx_t pidx)410ca853deeSEric Joyner iavf_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
411ca853deeSEric Joyner {
412ca853deeSEric Joyner 	struct iavf_vsi *vsi = arg;
413ca853deeSEric Joyner 	struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
414ca853deeSEric Joyner 
415ca853deeSEric Joyner 	/* Check for ENDLESS_TX MDD event */
416ca853deeSEric Joyner 	MPASS(pidx < vsi->shared->isc_ntxd[0]);
417ca853deeSEric Joyner 	wr32(vsi->hw, txr->tail, pidx);
418ca853deeSEric Joyner }
419ca853deeSEric Joyner 
420ca853deeSEric Joyner /**
421ca853deeSEric Joyner  * iavf_init_tx_ring - Initialize queue Tx ring
422ca853deeSEric Joyner  * @vsi: pointer to the VSI
423ca853deeSEric Joyner  * @que: pointer to queue to initialize
424ca853deeSEric Joyner  *
425ca853deeSEric Joyner  * (Re)Initialize a queue transmit ring by clearing its memory.
426ca853deeSEric Joyner  */
427ca853deeSEric Joyner void
iavf_init_tx_ring(struct iavf_vsi * vsi,struct iavf_tx_queue * que)428ca853deeSEric Joyner iavf_init_tx_ring(struct iavf_vsi *vsi, struct iavf_tx_queue *que)
429ca853deeSEric Joyner {
430ca853deeSEric Joyner 	struct tx_ring *txr = &que->txr;
431ca853deeSEric Joyner 
432ca853deeSEric Joyner 	/* Clear the old ring contents */
433ca853deeSEric Joyner 	bzero((void *)txr->tx_base,
434ca853deeSEric Joyner 	      (sizeof(struct iavf_tx_desc)) *
435ca853deeSEric Joyner 	      (vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0)));
436ca853deeSEric Joyner 
437ca853deeSEric Joyner 	wr32(vsi->hw, txr->tail, 0);
438ca853deeSEric Joyner }
439ca853deeSEric Joyner 
440ca853deeSEric Joyner /**
441ca853deeSEric Joyner  * iavf_get_tx_head - Get the index of the head of a ring
442ca853deeSEric Joyner  * @que: queue to read
443ca853deeSEric Joyner  *
444ca853deeSEric Joyner  * Retrieve the value from the location the HW records its HEAD index
445ca853deeSEric Joyner  *
446ca853deeSEric Joyner  * @returns the index of the HW head of the Tx queue
447ca853deeSEric Joyner  */
448ca853deeSEric Joyner static inline u32
iavf_get_tx_head(struct iavf_tx_queue * que)449ca853deeSEric Joyner iavf_get_tx_head(struct iavf_tx_queue *que)
450ca853deeSEric Joyner {
451ca853deeSEric Joyner 	if_softc_ctx_t          scctx = que->vsi->shared;
452ca853deeSEric Joyner 	struct tx_ring  *txr = &que->txr;
453ca853deeSEric Joyner 	void *head = &txr->tx_base[scctx->isc_ntxd[0]];
454ca853deeSEric Joyner 
455ca853deeSEric Joyner 	return LE32_TO_CPU(*(volatile __le32 *)head);
456ca853deeSEric Joyner }
457ca853deeSEric Joyner 
458ca853deeSEric Joyner /**
459ca853deeSEric Joyner  * iavf_isc_txd_credits_update_hwb - Update Tx ring credits
460ca853deeSEric Joyner  * @arg: void pointer to the VSI
461ca853deeSEric Joyner  * @qid: the queue id to update
462ca853deeSEric Joyner  * @clear: whether to update or only report current status
463ca853deeSEric Joyner  *
464ca853deeSEric Joyner  * Checks the number of packets in the queue that could be cleaned up.
465ca853deeSEric Joyner  *
466ca853deeSEric Joyner  * if clear is true, the iflib stack has cleaned the packets and is
467ca853deeSEric Joyner  * notifying the driver to update its processed ring pointer.
468ca853deeSEric Joyner  *
469ca853deeSEric Joyner  * @returns the number of packets in the ring that can be cleaned.
470ca853deeSEric Joyner  *
471ca853deeSEric Joyner  * @remark this function is intended for the head write back mode.
472ca853deeSEric Joyner  */
473ca853deeSEric Joyner static int
iavf_isc_txd_credits_update_hwb(void * arg,uint16_t qid,bool clear)474ca853deeSEric Joyner iavf_isc_txd_credits_update_hwb(void *arg, uint16_t qid, bool clear)
475ca853deeSEric Joyner {
476ca853deeSEric Joyner 	struct iavf_vsi          *vsi = arg;
477ca853deeSEric Joyner 	if_softc_ctx_t          scctx = vsi->shared;
478ca853deeSEric Joyner 	struct iavf_tx_queue     *que = &vsi->tx_queues[qid];
479ca853deeSEric Joyner 	struct tx_ring		*txr = &que->txr;
480ca853deeSEric Joyner 	int			 head, credits;
481ca853deeSEric Joyner 
482ca853deeSEric Joyner 	/* Get the Head WB value */
483ca853deeSEric Joyner 	head = iavf_get_tx_head(que);
484ca853deeSEric Joyner 
485ca853deeSEric Joyner 	credits = head - txr->tx_cidx_processed;
486ca853deeSEric Joyner 	if (credits < 0)
487ca853deeSEric Joyner 		credits += scctx->isc_ntxd[0];
488ca853deeSEric Joyner 	if (clear)
489ca853deeSEric Joyner 		txr->tx_cidx_processed = head;
490ca853deeSEric Joyner 
491ca853deeSEric Joyner 	return (credits);
492ca853deeSEric Joyner }
493ca853deeSEric Joyner 
494ca853deeSEric Joyner /**
495ca853deeSEric Joyner  * iavf_isc_txd_credits_update_dwb - Update Tx ring credits
496ca853deeSEric Joyner  * @arg: void pointer to the VSI
497ca853deeSEric Joyner  * @txqid: the queue id to update
498ca853deeSEric Joyner  * @clear: whether to update or only report current status
499ca853deeSEric Joyner  *
500ca853deeSEric Joyner  * Checks the number of packets in the queue that could be cleaned up.
501ca853deeSEric Joyner  *
502ca853deeSEric Joyner  * if clear is true, the iflib stack has cleaned the packets and is
503ca853deeSEric Joyner  * notifying the driver to update its processed ring pointer.
504ca853deeSEric Joyner  *
505ca853deeSEric Joyner  * @returns the number of packets in the ring that can be cleaned.
506ca853deeSEric Joyner  *
507ca853deeSEric Joyner  * @remark this function is intended for the descriptor write back mode.
508ca853deeSEric Joyner  */
509ca853deeSEric Joyner static int
iavf_isc_txd_credits_update_dwb(void * arg,uint16_t txqid,bool clear)510ca853deeSEric Joyner iavf_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear)
511ca853deeSEric Joyner {
512ca853deeSEric Joyner 	struct iavf_vsi *vsi = arg;
513ca853deeSEric Joyner 	struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid];
514ca853deeSEric Joyner 	if_softc_ctx_t scctx = vsi->shared;
515ca853deeSEric Joyner 	struct tx_ring *txr = &tx_que->txr;
516ca853deeSEric Joyner 
517ca853deeSEric Joyner 	qidx_t processed = 0;
518ca853deeSEric Joyner 	qidx_t cur, prev, ntxd, rs_cidx;
519ca853deeSEric Joyner 	int32_t delta;
520ca853deeSEric Joyner 	bool is_done;
521ca853deeSEric Joyner 
522ca853deeSEric Joyner 	rs_cidx = txr->tx_rs_cidx;
523ca853deeSEric Joyner 	if (rs_cidx == txr->tx_rs_pidx)
524ca853deeSEric Joyner 		return (0);
525ca853deeSEric Joyner 	cur = txr->tx_rsq[rs_cidx];
526ca853deeSEric Joyner 	MPASS(cur != QIDX_INVALID);
527ca853deeSEric Joyner 	is_done = iavf_is_tx_desc_done(txr, cur);
528ca853deeSEric Joyner 
529ca853deeSEric Joyner 	if (!is_done)
530ca853deeSEric Joyner 		return (0);
531ca853deeSEric Joyner 
532ca853deeSEric Joyner 	/* If clear is false just let caller know that there
533ca853deeSEric Joyner 	 * are descriptors to reclaim */
534ca853deeSEric Joyner 	if (!clear)
535ca853deeSEric Joyner 		return (1);
536ca853deeSEric Joyner 
537ca853deeSEric Joyner 	prev = txr->tx_cidx_processed;
538ca853deeSEric Joyner 	ntxd = scctx->isc_ntxd[0];
539ca853deeSEric Joyner 	do {
540ca853deeSEric Joyner 		MPASS(prev != cur);
541ca853deeSEric Joyner 		delta = (int32_t)cur - (int32_t)prev;
542ca853deeSEric Joyner 		if (delta < 0)
543ca853deeSEric Joyner 			delta += ntxd;
544ca853deeSEric Joyner 		MPASS(delta > 0);
545ca853deeSEric Joyner 		processed += delta;
546ca853deeSEric Joyner 		prev = cur;
547ca853deeSEric Joyner 		rs_cidx = (rs_cidx + 1) & (ntxd-1);
548ca853deeSEric Joyner 		if (rs_cidx == txr->tx_rs_pidx)
549ca853deeSEric Joyner 			break;
550ca853deeSEric Joyner 		cur = txr->tx_rsq[rs_cidx];
551ca853deeSEric Joyner 		MPASS(cur != QIDX_INVALID);
552ca853deeSEric Joyner 		is_done = iavf_is_tx_desc_done(txr, cur);
553ca853deeSEric Joyner 	} while (is_done);
554ca853deeSEric Joyner 
555ca853deeSEric Joyner 	txr->tx_rs_cidx = rs_cidx;
556ca853deeSEric Joyner 	txr->tx_cidx_processed = prev;
557ca853deeSEric Joyner 
558ca853deeSEric Joyner 	return (processed);
559ca853deeSEric Joyner }
560ca853deeSEric Joyner 
561ca853deeSEric Joyner /**
562ca853deeSEric Joyner  * iavf_isc_rxd_refill - Prepare descriptors for re-use
563ca853deeSEric Joyner  * @arg: void pointer to the VSI
564ca853deeSEric Joyner  * @iru: the Rx descriptor update structure
565ca853deeSEric Joyner  *
566ca853deeSEric Joyner  * Update Rx descriptors for a given queue so that they can be re-used by
567ca853deeSEric Joyner  * hardware for future packets.
568ca853deeSEric Joyner  */
569ca853deeSEric Joyner static void
iavf_isc_rxd_refill(void * arg,if_rxd_update_t iru)570ca853deeSEric Joyner iavf_isc_rxd_refill(void *arg, if_rxd_update_t iru)
571ca853deeSEric Joyner {
572ca853deeSEric Joyner 	struct iavf_vsi *vsi = arg;
573ca853deeSEric Joyner 	if_softc_ctx_t scctx = vsi->shared;
574ca853deeSEric Joyner 	struct rx_ring *rxr = &((vsi->rx_queues[iru->iru_qsidx]).rxr);
575ca853deeSEric Joyner 	uint64_t *paddrs;
576ca853deeSEric Joyner 	uint16_t next_pidx, pidx;
577ca853deeSEric Joyner 	uint16_t count;
578ca853deeSEric Joyner 	int i;
579ca853deeSEric Joyner 
580ca853deeSEric Joyner 	paddrs = iru->iru_paddrs;
581ca853deeSEric Joyner 	pidx = iru->iru_pidx;
582ca853deeSEric Joyner 	count = iru->iru_count;
583ca853deeSEric Joyner 
584ca853deeSEric Joyner 	for (i = 0, next_pidx = pidx; i < count; i++) {
585ca853deeSEric Joyner 		rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
586ca853deeSEric Joyner 		if (++next_pidx == scctx->isc_nrxd[0])
587ca853deeSEric Joyner 			next_pidx = 0;
588ca853deeSEric Joyner 	}
589ca853deeSEric Joyner }
590ca853deeSEric Joyner 
591ca853deeSEric Joyner /**
592ca853deeSEric Joyner  * iavf_isc_rxd_flush - Notify hardware of new Rx descriptors
593ca853deeSEric Joyner  * @arg: void pointer to the VSI
594ca853deeSEric Joyner  * @rxqid: Rx queue to update
595ca853deeSEric Joyner  * @flid: unused parameter
596ca853deeSEric Joyner  * @pidx: ring index to update to
597ca853deeSEric Joyner  *
598ca853deeSEric Joyner  * Updates the tail pointer of the Rx ring, notifying hardware of new
599ca853deeSEric Joyner  * descriptors available for receiving packets.
600ca853deeSEric Joyner  */
601ca853deeSEric Joyner static void
iavf_isc_rxd_flush(void * arg,uint16_t rxqid,uint8_t flid __unused,qidx_t pidx)602ca853deeSEric Joyner iavf_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
603ca853deeSEric Joyner {
604ca853deeSEric Joyner 	struct iavf_vsi		*vsi = arg;
605ca853deeSEric Joyner 	struct rx_ring		*rxr = &vsi->rx_queues[rxqid].rxr;
606ca853deeSEric Joyner 
607ca853deeSEric Joyner 	wr32(vsi->hw, rxr->tail, pidx);
608ca853deeSEric Joyner }
609ca853deeSEric Joyner 
610ca853deeSEric Joyner /**
611ca853deeSEric Joyner  * iavf_isc_rxd_available - Calculate number of available Rx descriptors
612ca853deeSEric Joyner  * @arg: void pointer to the VSI
613ca853deeSEric Joyner  * @rxqid: Rx queue to check
614ca853deeSEric Joyner  * @idx: starting index to check from
615ca853deeSEric Joyner  * @budget: maximum Rx budget
616ca853deeSEric Joyner  *
617ca853deeSEric Joyner  * Determines how many packets are ready to be processed in the Rx queue, up
618ca853deeSEric Joyner  * to the specified budget.
619ca853deeSEric Joyner  *
620ca853deeSEric Joyner  * @returns the number of packets ready to be processed, up to the budget.
621ca853deeSEric Joyner  */
622ca853deeSEric Joyner static int
iavf_isc_rxd_available(void * arg,uint16_t rxqid,qidx_t idx,qidx_t budget)623ca853deeSEric Joyner iavf_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
624ca853deeSEric Joyner {
625ca853deeSEric Joyner 	struct iavf_vsi *vsi = arg;
626ca853deeSEric Joyner 	struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
627ca853deeSEric Joyner 	union iavf_rx_desc *rxd;
628ca853deeSEric Joyner 	u64 qword;
629ca853deeSEric Joyner 	uint32_t status;
630ca853deeSEric Joyner 	int cnt, i, nrxd;
631ca853deeSEric Joyner 
632ca853deeSEric Joyner 	nrxd = vsi->shared->isc_nrxd[0];
633ca853deeSEric Joyner 
634ca853deeSEric Joyner 	for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) {
635ca853deeSEric Joyner 		rxd = &rxr->rx_base[i];
636ca853deeSEric Joyner 		qword = le64toh(rxd->wb.qword1.status_error_len);
637ca853deeSEric Joyner 		status = (qword & IAVF_RXD_QW1_STATUS_MASK)
638ca853deeSEric Joyner 			>> IAVF_RXD_QW1_STATUS_SHIFT;
639ca853deeSEric Joyner 
640ca853deeSEric Joyner 		if ((status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) == 0)
641ca853deeSEric Joyner 			break;
642ca853deeSEric Joyner 		if (++i == nrxd)
643ca853deeSEric Joyner 			i = 0;
644ca853deeSEric Joyner 		if (status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))
645ca853deeSEric Joyner 			cnt++;
646ca853deeSEric Joyner 	}
647ca853deeSEric Joyner 
648ca853deeSEric Joyner 	return (cnt);
649ca853deeSEric Joyner }
650ca853deeSEric Joyner 
651ca853deeSEric Joyner /**
652ca853deeSEric Joyner  * iavf_isc_rxd_pkt_get - Decapsulate packet from Rx descriptors
653ca853deeSEric Joyner  * @arg: void pointer to the VSI
654ca853deeSEric Joyner  * @ri: packet info structure
655ca853deeSEric Joyner  *
656ca853deeSEric Joyner  * Read packet data from the Rx ring descriptors and fill in the packet info
657ca853deeSEric Joyner  * structure so that the iflib stack can process the packet.
658ca853deeSEric Joyner  *
659ca853deeSEric Joyner  * @remark this routine executes in ithread context.
660ca853deeSEric Joyner  *
661ca853deeSEric Joyner  * @returns zero success, or EBADMSG if the packet is corrupted.
662ca853deeSEric Joyner  */
663ca853deeSEric Joyner static int
iavf_isc_rxd_pkt_get(void * arg,if_rxd_info_t ri)664ca853deeSEric Joyner iavf_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
665ca853deeSEric Joyner {
666ca853deeSEric Joyner 	struct iavf_vsi		*vsi = arg;
66752f45d8aSVincenzo Maffione 	if_softc_ctx_t		scctx = vsi->shared;
668ca853deeSEric Joyner 	struct iavf_rx_queue	*que = &vsi->rx_queues[ri->iri_qsidx];
669ca853deeSEric Joyner 	struct rx_ring		*rxr = &que->rxr;
670ca853deeSEric Joyner 	union iavf_rx_desc	*cur;
671ca853deeSEric Joyner 	u32		status, error;
672f7926a6dSVincenzo Maffione 	u16		plen;
673ca853deeSEric Joyner 	u64		qword;
674ca853deeSEric Joyner 	u8		ptype;
675ca853deeSEric Joyner 	bool		eop;
676ca853deeSEric Joyner 	int i, cidx;
677ca853deeSEric Joyner 
678ca853deeSEric Joyner 	cidx = ri->iri_cidx;
679ca853deeSEric Joyner 	i = 0;
680ca853deeSEric Joyner 	do {
681ca853deeSEric Joyner 		/* 5 descriptor receive limit */
682ca853deeSEric Joyner 		MPASS(i < IAVF_MAX_RX_SEGS);
683ca853deeSEric Joyner 
684ca853deeSEric Joyner 		cur = &rxr->rx_base[cidx];
685ca853deeSEric Joyner 		qword = le64toh(cur->wb.qword1.status_error_len);
686ca853deeSEric Joyner 		status = (qword & IAVF_RXD_QW1_STATUS_MASK)
687ca853deeSEric Joyner 		    >> IAVF_RXD_QW1_STATUS_SHIFT;
688ca853deeSEric Joyner 		error = (qword & IAVF_RXD_QW1_ERROR_MASK)
689ca853deeSEric Joyner 		    >> IAVF_RXD_QW1_ERROR_SHIFT;
690ca853deeSEric Joyner 		plen = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK)
691ca853deeSEric Joyner 		    >> IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
692ca853deeSEric Joyner 		ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK)
693ca853deeSEric Joyner 		    >> IAVF_RXD_QW1_PTYPE_SHIFT;
694ca853deeSEric Joyner 
695ca853deeSEric Joyner 		/* we should never be called without a valid descriptor */
696ca853deeSEric Joyner 		MPASS((status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) != 0);
697ca853deeSEric Joyner 
698ca853deeSEric Joyner 		ri->iri_len += plen;
699ca853deeSEric Joyner 		rxr->rx_bytes += plen;
700ca853deeSEric Joyner 
701ca853deeSEric Joyner 		cur->wb.qword1.status_error_len = 0;
702ca853deeSEric Joyner 		eop = (status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT));
703ca853deeSEric Joyner 
704ca853deeSEric Joyner 		/*
705ca853deeSEric Joyner 		** Make sure bad packets are discarded,
706ca853deeSEric Joyner 		** note that only EOP descriptor has valid
707ca853deeSEric Joyner 		** error results.
708ca853deeSEric Joyner 		*/
709ca853deeSEric Joyner 		if (eop && (error & (1 << IAVF_RX_DESC_ERROR_RXE_SHIFT))) {
710ca853deeSEric Joyner 			rxr->desc_errs++;
711ca853deeSEric Joyner 			return (EBADMSG);
712ca853deeSEric Joyner 		}
713ca853deeSEric Joyner 		ri->iri_frags[i].irf_flid = 0;
714ca853deeSEric Joyner 		ri->iri_frags[i].irf_idx = cidx;
715ca853deeSEric Joyner 		ri->iri_frags[i].irf_len = plen;
716ca853deeSEric Joyner 		if (++cidx == vsi->shared->isc_nrxd[0])
717ca853deeSEric Joyner 			cidx = 0;
718ca853deeSEric Joyner 		i++;
719ca853deeSEric Joyner 	} while (!eop);
720ca853deeSEric Joyner 
721ca853deeSEric Joyner 	/* capture data for dynamic ITR adjustment */
722ca853deeSEric Joyner 	rxr->packets++;
723ca853deeSEric Joyner 	rxr->rx_packets++;
724ca853deeSEric Joyner 
72552f45d8aSVincenzo Maffione 	if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
726ca853deeSEric Joyner 		iavf_rx_checksum(ri, status, error, ptype);
727ca853deeSEric Joyner 	ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss);
728ca853deeSEric Joyner 	ri->iri_rsstype = iavf_ptype_to_hash(ptype);
729f7926a6dSVincenzo Maffione 	if (status & (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
730f7926a6dSVincenzo Maffione 		ri->iri_vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
731ca853deeSEric Joyner 		ri->iri_flags |= M_VLANTAG;
732f7926a6dSVincenzo Maffione 	}
733f7926a6dSVincenzo Maffione 	ri->iri_nfrags = i;
734ca853deeSEric Joyner 	return (0);
735ca853deeSEric Joyner }
736ca853deeSEric Joyner 
737ca853deeSEric Joyner /**
738ca853deeSEric Joyner  * iavf_rx_checksum - Handle Rx hardware checksum indication
739ca853deeSEric Joyner  * @ri: Rx packet info structure
740ca853deeSEric Joyner  * @status: status from Rx descriptor
741ca853deeSEric Joyner  * @error: error from Rx descriptor
742ca853deeSEric Joyner  * @ptype: packet type
743ca853deeSEric Joyner  *
744ca853deeSEric Joyner  * Verify that the hardware indicated that the checksum is valid.
745ca853deeSEric Joyner  * Inform the stack about the status of checksum so that stack
746ca853deeSEric Joyner  * doesn't spend time verifying the checksum.
747ca853deeSEric Joyner  */
748ca853deeSEric Joyner static void
iavf_rx_checksum(if_rxd_info_t ri,u32 status,u32 error,u8 ptype)749ca853deeSEric Joyner iavf_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
750ca853deeSEric Joyner {
751ca853deeSEric Joyner 	struct iavf_rx_ptype_decoded decoded;
752ca853deeSEric Joyner 
753ca853deeSEric Joyner 	ri->iri_csum_flags = 0;
754ca853deeSEric Joyner 
755ca853deeSEric Joyner 	/* No L3 or L4 checksum was calculated */
756ca853deeSEric Joyner 	if (!(status & (1 << IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
757ca853deeSEric Joyner 		return;
758ca853deeSEric Joyner 
759ca853deeSEric Joyner 	decoded = decode_rx_desc_ptype(ptype);
760ca853deeSEric Joyner 
761ca853deeSEric Joyner 	/* IPv6 with extension headers likely have bad csum */
762ca853deeSEric Joyner 	if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
763ca853deeSEric Joyner 	    decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6) {
764ca853deeSEric Joyner 		if (status &
765ca853deeSEric Joyner 		    (1 << IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
766ca853deeSEric Joyner 			ri->iri_csum_flags = 0;
767ca853deeSEric Joyner 			return;
768ca853deeSEric Joyner 		}
769ca853deeSEric Joyner 	}
770ca853deeSEric Joyner 
771ca853deeSEric Joyner 	ri->iri_csum_flags |= CSUM_L3_CALC;
772ca853deeSEric Joyner 
773ca853deeSEric Joyner 	/* IPv4 checksum error */
774ca853deeSEric Joyner 	if (error & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT))
775ca853deeSEric Joyner 		return;
776ca853deeSEric Joyner 
777ca853deeSEric Joyner 	ri->iri_csum_flags |= CSUM_L3_VALID;
778ca853deeSEric Joyner 	ri->iri_csum_flags |= CSUM_L4_CALC;
779ca853deeSEric Joyner 
780ca853deeSEric Joyner 	/* L4 checksum error */
781ca853deeSEric Joyner 	if (error & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT))
782ca853deeSEric Joyner 		return;
783ca853deeSEric Joyner 
784ca853deeSEric Joyner 	ri->iri_csum_flags |= CSUM_L4_VALID;
785ca853deeSEric Joyner 	ri->iri_csum_data |= htons(0xffff);
786ca853deeSEric Joyner }
787