xref: /freebsd/sys/contrib/ena-com/ena_eth_com.c (revision 19261079)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ena_eth_com.h"
35 
36 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 	struct ena_com_io_cq *io_cq)
38 {
39 	struct ena_eth_io_rx_cdesc_base *cdesc;
40 	u16 expected_phase, head_masked;
41 	u16 desc_phase;
42 
43 	head_masked = io_cq->head & (io_cq->q_depth - 1);
44 	expected_phase = io_cq->phase;
45 
46 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
48 
49 	desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51 
52 	if (desc_phase != expected_phase)
53 		return NULL;
54 
55 	/* Make sure we read the rest of the descriptor after the phase bit
56 	 * has been read
57 	 */
58 	dma_rmb();
59 
60 	return cdesc;
61 }
62 
63 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
64 {
65 	u16 tail_masked;
66 	u32 offset;
67 
68 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
69 
70 	offset = tail_masked * io_sq->desc_entry_size;
71 
72 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
73 }
74 
75 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
76 						     u8 *bounce_buffer)
77 {
78 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
79 
80 	u16 dst_tail_mask;
81 	u32 dst_offset;
82 
83 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
84 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
85 
86 	if (is_llq_max_tx_burst_exists(io_sq)) {
87 		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
88 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
89 				    "Error: trying to send more packets than tx burst allows\n");
90 			return ENA_COM_NO_SPACE;
91 		}
92 
93 		io_sq->entries_in_tx_burst_left--;
94 		ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
95 			    "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
96 			    io_sq->qid, io_sq->entries_in_tx_burst_left);
97 	}
98 
99 	/* Make sure everything was written into the bounce buffer before
100 	 * writing the bounce buffer to the device
101 	 */
102 	wmb();
103 
104 	/* The line is completed. Copy it to dev */
105 	ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
106 				bounce_buffer,
107 				llq_info->desc_list_entry_size);
108 
109 	io_sq->tail++;
110 
111 	/* Switch phase bit in case of wrap around */
112 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
113 		io_sq->phase ^= 1;
114 
115 	return ENA_COM_OK;
116 }
117 
118 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
119 						 u8 *header_src,
120 						 u16 header_len)
121 {
122 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
123 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
124 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
125 	u16 header_offset;
126 
127 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
128 		return 0;
129 
130 	header_offset =
131 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
132 
133 	if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
134 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
135 			    "Trying to write header larger than llq entry can accommodate\n");
136 		return ENA_COM_FAULT;
137 	}
138 
139 	if (unlikely(!bounce_buffer)) {
140 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
141 			    "Bounce buffer is NULL\n");
142 		return ENA_COM_FAULT;
143 	}
144 
145 	memcpy(bounce_buffer + header_offset, header_src, header_len);
146 
147 	return 0;
148 }
149 
150 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
151 {
152 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
153 	u8 *bounce_buffer;
154 	void *sq_desc;
155 
156 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
157 
158 	if (unlikely(!bounce_buffer)) {
159 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
160 			    "Bounce buffer is NULL\n");
161 		return NULL;
162 	}
163 
164 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
165 	pkt_ctrl->idx++;
166 	pkt_ctrl->descs_left_in_line--;
167 
168 	return sq_desc;
169 }
170 
171 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
172 {
173 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
174 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
175 	int rc;
176 
177 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
178 		return ENA_COM_OK;
179 
180 	/* bounce buffer was used, so write it and get a new one */
181 	if (likely(pkt_ctrl->idx)) {
182 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
183 							pkt_ctrl->curr_bounce_buf);
184 		if (unlikely(rc)) {
185 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
186 				    "Failed to write bounce buffer to device\n");
187 			return rc;
188 		}
189 
190 		pkt_ctrl->curr_bounce_buf =
191 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
192 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
193 		       0x0, llq_info->desc_list_entry_size);
194 	}
195 
196 	pkt_ctrl->idx = 0;
197 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
198 	return ENA_COM_OK;
199 }
200 
201 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
202 {
203 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
204 		return get_sq_desc_llq(io_sq);
205 
206 	return get_sq_desc_regular_queue(io_sq);
207 }
208 
209 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
210 {
211 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
212 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
213 	int rc;
214 
215 	if (!pkt_ctrl->descs_left_in_line) {
216 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
217 							pkt_ctrl->curr_bounce_buf);
218 		if (unlikely(rc)) {
219 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
220 				    "Failed to write bounce buffer to device\n");
221 			return rc;
222 		}
223 
224 		pkt_ctrl->curr_bounce_buf =
225 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
226 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
227 		       0x0, llq_info->desc_list_entry_size);
228 
229 		pkt_ctrl->idx = 0;
230 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
231 			pkt_ctrl->descs_left_in_line = 1;
232 		else
233 			pkt_ctrl->descs_left_in_line =
234 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
235 	}
236 
237 	return ENA_COM_OK;
238 }
239 
240 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
241 {
242 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
243 		return ena_com_sq_update_llq_tail(io_sq);
244 
245 	io_sq->tail++;
246 
247 	/* Switch phase bit in case of wrap around */
248 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
249 		io_sq->phase ^= 1;
250 
251 	return ENA_COM_OK;
252 }
253 
254 static struct ena_eth_io_rx_cdesc_base *
255 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
256 {
257 	idx &= (io_cq->q_depth - 1);
258 	return (struct ena_eth_io_rx_cdesc_base *)
259 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
260 		idx * io_cq->cdesc_entry_size_in_bytes);
261 }
262 
263 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
264 					   u16 *first_cdesc_idx)
265 {
266 	struct ena_eth_io_rx_cdesc_base *cdesc;
267 	u16 count = 0, head_masked;
268 	u32 last = 0;
269 
270 	do {
271 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
272 		if (!cdesc)
273 			break;
274 
275 		ena_com_cq_inc_head(io_cq);
276 		count++;
277 		last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
278 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
279 	} while (!last);
280 
281 	if (last) {
282 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
283 		count += io_cq->cur_rx_pkt_cdesc_count;
284 
285 		head_masked = io_cq->head & (io_cq->q_depth - 1);
286 
287 		io_cq->cur_rx_pkt_cdesc_count = 0;
288 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
289 
290 		ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
291 			    "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
292 			    io_cq->qid, *first_cdesc_idx, count);
293 	} else {
294 		io_cq->cur_rx_pkt_cdesc_count += count;
295 		count = 0;
296 	}
297 
298 	return count;
299 }
300 
301 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
302 			       struct ena_com_tx_meta *ena_meta)
303 {
304 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
305 
306 	meta_desc = get_sq_desc(io_sq);
307 	if (unlikely(!meta_desc))
308 		return ENA_COM_FAULT;
309 
310 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
311 
312 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
313 
314 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
315 
316 	/* bits 0-9 of the mss */
317 	meta_desc->word2 |= ((u32)ena_meta->mss <<
318 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
319 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
320 	/* bits 10-13 of the mss */
321 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
322 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
323 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
324 
325 	/* Extended meta desc */
326 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
327 	meta_desc->len_ctrl |= ((u32)io_sq->phase <<
328 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
329 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
330 
331 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
332 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
333 
334 	meta_desc->word2 |= ena_meta->l3_hdr_len &
335 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
336 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
337 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
338 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
339 
340 	meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
341 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
342 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
343 
344 	return ena_com_sq_update_tail(io_sq);
345 }
346 
347 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
348 						 struct ena_com_tx_ctx *ena_tx_ctx,
349 						 bool *have_meta)
350 {
351 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
352 
353 	/* When disable meta caching is set, don't bother to save the meta and
354 	 * compare it to the stored version, just create the meta
355 	 */
356 	if (io_sq->disable_meta_caching) {
357 		if (unlikely(!ena_tx_ctx->meta_valid))
358 			return ENA_COM_INVAL;
359 
360 		*have_meta = true;
361 		return ena_com_create_meta(io_sq, ena_meta);
362 	}
363 
364 	if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
365 		*have_meta = true;
366 		/* Cache the meta desc */
367 		memcpy(&io_sq->cached_tx_meta, ena_meta,
368 		       sizeof(struct ena_com_tx_meta));
369 		return ena_com_create_meta(io_sq, ena_meta);
370 	}
371 
372 	*have_meta = false;
373 	return ENA_COM_OK;
374 }
375 
376 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
377 				 struct ena_com_rx_ctx *ena_rx_ctx,
378 				 struct ena_eth_io_rx_cdesc_base *cdesc)
379 {
380 	ena_rx_ctx->l3_proto = cdesc->status &
381 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
382 	ena_rx_ctx->l4_proto =
383 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
384 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
385 	ena_rx_ctx->l3_csum_err =
386 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
387 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
388 	ena_rx_ctx->l4_csum_err =
389 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
390 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
391 	ena_rx_ctx->l4_csum_checked =
392 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
393 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
394 	ena_rx_ctx->hash = cdesc->hash;
395 	ena_rx_ctx->frag =
396 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
397 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
398 
399 	ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
400 		    "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
401 		    ena_rx_ctx->l3_proto,
402 		    ena_rx_ctx->l4_proto,
403 		    ena_rx_ctx->l3_csum_err,
404 		    ena_rx_ctx->l4_csum_err,
405 		    ena_rx_ctx->hash,
406 		    ena_rx_ctx->frag,
407 		    cdesc->status);
408 }
409 
410 /*****************************************************************************/
411 /*****************************     API      **********************************/
412 /*****************************************************************************/
413 
414 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
415 		       struct ena_com_tx_ctx *ena_tx_ctx,
416 		       int *nb_hw_desc)
417 {
418 	struct ena_eth_io_tx_desc *desc = NULL;
419 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
420 	void *buffer_to_push = ena_tx_ctx->push_header;
421 	u16 header_len = ena_tx_ctx->header_len;
422 	u16 num_bufs = ena_tx_ctx->num_bufs;
423 	u16 start_tail = io_sq->tail;
424 	int i, rc;
425 	bool have_meta;
426 	u64 addr_hi;
427 
428 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
429 		 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
430 
431 	/* num_bufs +1 for potential meta desc */
432 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
433 		ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
434 			    "Not enough space in the tx queue\n");
435 		return ENA_COM_NO_MEM;
436 	}
437 
438 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
439 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
440 			    "Header size is too large %d max header: %d\n",
441 			    header_len, io_sq->tx_max_header_size);
442 		return ENA_COM_INVAL;
443 	}
444 
445 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
446 		     && !buffer_to_push)) {
447 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
448 			    "Push header wasn't provided on LLQ mode\n");
449 		return ENA_COM_INVAL;
450 	}
451 
452 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
453 	if (unlikely(rc))
454 		return rc;
455 
456 	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
457 	if (unlikely(rc)) {
458 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
459 			    "Failed to create and store tx meta desc\n");
460 		return rc;
461 	}
462 
463 	/* If the caller doesn't want to send packets */
464 	if (unlikely(!num_bufs && !header_len)) {
465 		rc = ena_com_close_bounce_buffer(io_sq);
466 		if (rc)
467 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
468 				    "Failed to write buffers to LLQ\n");
469 		*nb_hw_desc = io_sq->tail - start_tail;
470 		return rc;
471 	}
472 
473 	desc = get_sq_desc(io_sq);
474 	if (unlikely(!desc))
475 		return ENA_COM_FAULT;
476 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
477 
478 	/* Set first desc when we don't have meta descriptor */
479 	if (!have_meta)
480 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
481 
482 	desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
483 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
484 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
485 	desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
486 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
487 
488 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
489 
490 	/* Bits 0-9 */
491 	desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
492 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
493 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
494 
495 	desc->meta_ctrl |= (ena_tx_ctx->df <<
496 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
497 		ENA_ETH_IO_TX_DESC_DF_MASK;
498 
499 	/* Bits 10-15 */
500 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
501 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
502 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
503 
504 	if (ena_tx_ctx->meta_valid) {
505 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
506 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
507 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
508 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
509 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
510 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
511 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
512 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
513 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
514 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
515 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
516 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
517 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
518 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
519 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
520 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
521 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
522 	}
523 
524 	for (i = 0; i < num_bufs; i++) {
525 		/* The first desc share the same desc as the header */
526 		if (likely(i != 0)) {
527 			rc = ena_com_sq_update_tail(io_sq);
528 			if (unlikely(rc)) {
529 				ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
530 					    "Failed to update sq tail\n");
531 				return rc;
532 			}
533 
534 			desc = get_sq_desc(io_sq);
535 			if (unlikely(!desc))
536 				return ENA_COM_FAULT;
537 
538 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
539 
540 			desc->len_ctrl |= ((u32)io_sq->phase <<
541 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
542 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
543 		}
544 
545 		desc->len_ctrl |= ena_bufs->len &
546 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
547 
548 		addr_hi = ((ena_bufs->paddr &
549 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
550 
551 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
552 		desc->buff_addr_hi_hdr_sz |= addr_hi &
553 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
554 		ena_bufs++;
555 	}
556 
557 	/* set the last desc indicator */
558 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
559 
560 	rc = ena_com_sq_update_tail(io_sq);
561 	if (unlikely(rc)) {
562 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
563 			    "Failed to update sq tail of the last descriptor\n");
564 		return rc;
565 	}
566 
567 	rc = ena_com_close_bounce_buffer(io_sq);
568 	if (rc)
569 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
570 			    "Failed when closing bounce buffer\n");
571 
572 	*nb_hw_desc = io_sq->tail - start_tail;
573 	return rc;
574 }
575 
576 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
577 		   struct ena_com_io_sq *io_sq,
578 		   struct ena_com_rx_ctx *ena_rx_ctx)
579 {
580 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
581 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
582 	u16 q_depth = io_cq->q_depth;
583 	u16 cdesc_idx = 0;
584 	u16 nb_hw_desc;
585 	u16 i = 0;
586 
587 	ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
588 		 ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
589 
590 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
591 	if (nb_hw_desc == 0) {
592 		ena_rx_ctx->descs = nb_hw_desc;
593 		return 0;
594 	}
595 
596 	ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
597 		    "Fetch rx packet: queue %d completed desc: %d\n",
598 		    io_cq->qid, nb_hw_desc);
599 
600 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
601 		ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
602 			    "Too many RX cdescs (%d) > MAX(%d)\n",
603 			    nb_hw_desc, ena_rx_ctx->max_bufs);
604 		return ENA_COM_NO_SPACE;
605 	}
606 
607 	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
608 	ena_rx_ctx->pkt_offset = cdesc->offset;
609 
610 	do {
611 		ena_buf[i].len = cdesc->length;
612 		ena_buf[i].req_id = cdesc->req_id;
613 		if (unlikely(ena_buf[i].req_id >= q_depth))
614 			return ENA_COM_EIO;
615 
616 		if (++i >= nb_hw_desc)
617 			break;
618 
619 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
620 
621 	} while (1);
622 
623 	/* Update SQ head ptr */
624 	io_sq->next_to_comp += nb_hw_desc;
625 
626 	ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
627 		    "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
628 		    io_sq->qid, io_sq->next_to_comp);
629 
630 	/* Get rx flags from the last pkt */
631 	ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
632 
633 	ena_rx_ctx->descs = nb_hw_desc;
634 
635 	return 0;
636 }
637 
638 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
639 			       struct ena_com_buf *ena_buf,
640 			       u16 req_id)
641 {
642 	struct ena_eth_io_rx_desc *desc;
643 
644 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
645 		 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
646 
647 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
648 		return ENA_COM_NO_SPACE;
649 
650 	desc = get_sq_desc(io_sq);
651 	if (unlikely(!desc))
652 		return ENA_COM_FAULT;
653 
654 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
655 
656 	desc->length = ena_buf->len;
657 
658 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
659 		     ENA_ETH_IO_RX_DESC_LAST_MASK |
660 		     ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
661 		     (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
662 
663 	desc->req_id = req_id;
664 
665 	ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
666 		    "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
667 		    __func__, io_sq->qid, req_id);
668 
669 	desc->buff_addr_lo = (u32)ena_buf->paddr;
670 	desc->buff_addr_hi =
671 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
672 
673 	return ena_com_sq_update_tail(io_sq);
674 }
675 
676 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
677 {
678 	struct ena_eth_io_rx_cdesc_base *cdesc;
679 
680 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
681 	if (cdesc)
682 		return false;
683 	else
684 		return true;
685 }
686