xref: /freebsd/sys/contrib/ena-com/ena_eth_com.c (revision a3557ef0)
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ena_eth_com.h"
35 
36 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 	struct ena_com_io_cq *io_cq)
38 {
39 	struct ena_eth_io_rx_cdesc_base *cdesc;
40 	u16 expected_phase, head_masked;
41 	u16 desc_phase;
42 
43 	head_masked = io_cq->head & (io_cq->q_depth - 1);
44 	expected_phase = io_cq->phase;
45 
46 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
48 
49 	desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51 
52 	if (desc_phase != expected_phase)
53 		return NULL;
54 
55 	/* Make sure we read the rest of the descriptor after the phase bit
56 	 * has been read
57 	 */
58 	dma_rmb();
59 
60 	return cdesc;
61 }
62 
63 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
64 {
65 	u16 tail_masked;
66 	u32 offset;
67 
68 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
69 
70 	offset = tail_masked * io_sq->desc_entry_size;
71 
72 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
73 }
74 
75 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
76 						     u8 *bounce_buffer)
77 {
78 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
79 
80 	u16 dst_tail_mask;
81 	u32 dst_offset;
82 
83 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
84 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
85 
86 	if (is_llq_max_tx_burst_exists(io_sq)) {
87 		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
88 			ena_trc_err("Error: trying to send more packets than tx burst allows\n");
89 			return ENA_COM_NO_SPACE;
90 		}
91 
92 		io_sq->entries_in_tx_burst_left--;
93 		ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
94 			    io_sq->qid, io_sq->entries_in_tx_burst_left);
95 	}
96 
97 	/* Make sure everything was written into the bounce buffer before
98 	 * writing the bounce buffer to the device
99 	 */
100 	wmb();
101 
102 	/* The line is completed. Copy it to dev */
103 	ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
104 				bounce_buffer,
105 				llq_info->desc_list_entry_size);
106 
107 	io_sq->tail++;
108 
109 	/* Switch phase bit in case of wrap around */
110 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
111 		io_sq->phase ^= 1;
112 
113 	return ENA_COM_OK;
114 }
115 
116 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
117 						 u8 *header_src,
118 						 u16 header_len)
119 {
120 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
121 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
122 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
123 	u16 header_offset;
124 
125 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
126 		return 0;
127 
128 	header_offset =
129 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
130 
131 	if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
132 		ena_trc_err("trying to write header larger than llq entry can accommodate\n");
133 		return ENA_COM_FAULT;
134 	}
135 
136 	if (unlikely(!bounce_buffer)) {
137 		ena_trc_err("bounce buffer is NULL\n");
138 		return ENA_COM_FAULT;
139 	}
140 
141 	memcpy(bounce_buffer + header_offset, header_src, header_len);
142 
143 	return 0;
144 }
145 
146 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
147 {
148 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
149 	u8 *bounce_buffer;
150 	void *sq_desc;
151 
152 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
153 
154 	if (unlikely(!bounce_buffer)) {
155 		ena_trc_err("bounce buffer is NULL\n");
156 		return NULL;
157 	}
158 
159 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
160 	pkt_ctrl->idx++;
161 	pkt_ctrl->descs_left_in_line--;
162 
163 	return sq_desc;
164 }
165 
166 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
167 {
168 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
169 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
170 	int rc;
171 
172 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
173 		return ENA_COM_OK;
174 
175 	/* bounce buffer was used, so write it and get a new one */
176 	if (pkt_ctrl->idx) {
177 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
178 							pkt_ctrl->curr_bounce_buf);
179 		if (unlikely(rc)) {
180 			ena_trc_err("failed to write bounce buffer to device\n");
181 			return rc;
182 		}
183 
184 		pkt_ctrl->curr_bounce_buf =
185 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
186 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
187 		       0x0, llq_info->desc_list_entry_size);
188 	}
189 
190 	pkt_ctrl->idx = 0;
191 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
192 	return ENA_COM_OK;
193 }
194 
195 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
196 {
197 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
198 		return get_sq_desc_llq(io_sq);
199 
200 	return get_sq_desc_regular_queue(io_sq);
201 }
202 
203 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
204 {
205 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
206 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
207 	int rc;
208 
209 	if (!pkt_ctrl->descs_left_in_line) {
210 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
211 							pkt_ctrl->curr_bounce_buf);
212 		if (unlikely(rc)) {
213 			ena_trc_err("failed to write bounce buffer to device\n");
214 			return rc;
215 		}
216 
217 		pkt_ctrl->curr_bounce_buf =
218 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
219 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
220 		       0x0, llq_info->desc_list_entry_size);
221 
222 		pkt_ctrl->idx = 0;
223 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
224 			pkt_ctrl->descs_left_in_line = 1;
225 		else
226 			pkt_ctrl->descs_left_in_line =
227 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
228 	}
229 
230 	return ENA_COM_OK;
231 }
232 
233 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
234 {
235 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
236 		return ena_com_sq_update_llq_tail(io_sq);
237 
238 	io_sq->tail++;
239 
240 	/* Switch phase bit in case of wrap around */
241 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
242 		io_sq->phase ^= 1;
243 
244 	return ENA_COM_OK;
245 }
246 
247 static struct ena_eth_io_rx_cdesc_base *
248 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
249 {
250 	idx &= (io_cq->q_depth - 1);
251 	return (struct ena_eth_io_rx_cdesc_base *)
252 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
253 		idx * io_cq->cdesc_entry_size_in_bytes);
254 }
255 
256 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
257 					   u16 *first_cdesc_idx)
258 {
259 	struct ena_eth_io_rx_cdesc_base *cdesc;
260 	u16 count = 0, head_masked;
261 	u32 last = 0;
262 
263 	do {
264 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
265 		if (!cdesc)
266 			break;
267 
268 		ena_com_cq_inc_head(io_cq);
269 		count++;
270 		last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
271 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
272 	} while (!last);
273 
274 	if (last) {
275 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
276 		count += io_cq->cur_rx_pkt_cdesc_count;
277 
278 		head_masked = io_cq->head & (io_cq->q_depth - 1);
279 
280 		io_cq->cur_rx_pkt_cdesc_count = 0;
281 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
282 
283 		ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
284 			    io_cq->qid, *first_cdesc_idx, count);
285 	} else {
286 		io_cq->cur_rx_pkt_cdesc_count += count;
287 		count = 0;
288 	}
289 
290 	return count;
291 }
292 
293 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
294 			       struct ena_com_tx_meta *ena_meta)
295 {
296 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
297 
298 	meta_desc = get_sq_desc(io_sq);
299 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
300 
301 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
302 
303 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
304 
305 	/* bits 0-9 of the mss */
306 	meta_desc->word2 |= (ena_meta->mss <<
307 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
308 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
309 	/* bits 10-13 of the mss */
310 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
311 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
312 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
313 
314 	/* Extended meta desc */
315 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
316 	meta_desc->len_ctrl |= (io_sq->phase <<
317 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
318 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
319 
320 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
321 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
322 
323 	meta_desc->word2 |= ena_meta->l3_hdr_len &
324 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
325 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
326 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
327 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
328 
329 	meta_desc->word2 |= (ena_meta->l4_hdr_len <<
330 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
331 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
332 
333 	return ena_com_sq_update_tail(io_sq);
334 }
335 
336 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
337 						 struct ena_com_tx_ctx *ena_tx_ctx,
338 						 bool *have_meta)
339 {
340 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
341 
342 	/* When disable meta caching is set, don't bother to save the meta and
343 	 * compare it to the stored version, just create the meta
344 	 */
345 	if (io_sq->disable_meta_caching) {
346 		if (unlikely(!ena_tx_ctx->meta_valid))
347 			return ENA_COM_INVAL;
348 
349 		*have_meta = true;
350 		return ena_com_create_meta(io_sq, ena_meta);
351 	} else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
352 		*have_meta = true;
353 		/* Cache the meta desc */
354 		memcpy(&io_sq->cached_tx_meta, ena_meta,
355 		       sizeof(struct ena_com_tx_meta));
356 		return ena_com_create_meta(io_sq, ena_meta);
357 	} else {
358 		*have_meta = false;
359 		return ENA_COM_OK;
360 	}
361 }
362 
363 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
364 					struct ena_eth_io_rx_cdesc_base *cdesc)
365 {
366 	ena_rx_ctx->l3_proto = cdesc->status &
367 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
368 	ena_rx_ctx->l4_proto =
369 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
370 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
371 	ena_rx_ctx->l3_csum_err =
372 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
373 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
374 	ena_rx_ctx->l4_csum_err =
375 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
376 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
377 	ena_rx_ctx->l4_csum_checked =
378 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
379 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
380 	ena_rx_ctx->hash = cdesc->hash;
381 	ena_rx_ctx->frag =
382 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
383 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
384 
385 	ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
386 		    ena_rx_ctx->l3_proto,
387 		    ena_rx_ctx->l4_proto,
388 		    ena_rx_ctx->l3_csum_err,
389 		    ena_rx_ctx->l4_csum_err,
390 		    ena_rx_ctx->hash,
391 		    ena_rx_ctx->frag,
392 		    cdesc->status);
393 }
394 
395 /*****************************************************************************/
396 /*****************************     API      **********************************/
397 /*****************************************************************************/
398 
399 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
400 		       struct ena_com_tx_ctx *ena_tx_ctx,
401 		       int *nb_hw_desc)
402 {
403 	struct ena_eth_io_tx_desc *desc = NULL;
404 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
405 	void *buffer_to_push = ena_tx_ctx->push_header;
406 	u16 header_len = ena_tx_ctx->header_len;
407 	u16 num_bufs = ena_tx_ctx->num_bufs;
408 	u16 start_tail = io_sq->tail;
409 	int i, rc;
410 	bool have_meta;
411 	u64 addr_hi;
412 
413 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
414 		 "wrong Q type");
415 
416 	/* num_bufs +1 for potential meta desc */
417 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
418 		ena_trc_dbg("Not enough space in the tx queue\n");
419 		return ENA_COM_NO_MEM;
420 	}
421 
422 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
423 		ena_trc_err("header size is too large %d max header: %d\n",
424 			    header_len, io_sq->tx_max_header_size);
425 		return ENA_COM_INVAL;
426 	}
427 
428 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
429 		     && !buffer_to_push)) {
430 		ena_trc_err("push header wasn't provided on LLQ mode\n");
431 		return ENA_COM_INVAL;
432 	}
433 
434 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
435 	if (unlikely(rc))
436 		return rc;
437 
438 	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
439 	if (unlikely(rc)) {
440 		ena_trc_err("failed to create and store tx meta desc\n");
441 		return rc;
442 	}
443 
444 	/* If the caller doesn't want to send packets */
445 	if (unlikely(!num_bufs && !header_len)) {
446 		rc = ena_com_close_bounce_buffer(io_sq);
447 		if (rc)
448 			ena_trc_err("failed to write buffers to LLQ\n");
449 		*nb_hw_desc = io_sq->tail - start_tail;
450 		return rc;
451 	}
452 
453 	desc = get_sq_desc(io_sq);
454 	if (unlikely(!desc))
455 		return ENA_COM_FAULT;
456 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
457 
458 	/* Set first desc when we don't have meta descriptor */
459 	if (!have_meta)
460 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
461 
462 	desc->buff_addr_hi_hdr_sz |= (header_len <<
463 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
464 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
465 	desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
466 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
467 
468 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
469 
470 	/* Bits 0-9 */
471 	desc->meta_ctrl |= (ena_tx_ctx->req_id <<
472 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
473 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
474 
475 	desc->meta_ctrl |= (ena_tx_ctx->df <<
476 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
477 		ENA_ETH_IO_TX_DESC_DF_MASK;
478 
479 	/* Bits 10-15 */
480 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
481 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
482 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
483 
484 	if (ena_tx_ctx->meta_valid) {
485 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
486 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
487 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
488 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
489 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
490 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
491 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
492 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
493 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
494 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
495 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
496 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
497 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
498 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
499 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
500 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
501 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
502 	}
503 
504 	for (i = 0; i < num_bufs; i++) {
505 		/* The first desc share the same desc as the header */
506 		if (likely(i != 0)) {
507 			rc = ena_com_sq_update_tail(io_sq);
508 			if (unlikely(rc)) {
509 				ena_trc_err("failed to update sq tail\n");
510 				return rc;
511 			}
512 
513 			desc = get_sq_desc(io_sq);
514 			if (unlikely(!desc))
515 				return ENA_COM_FAULT;
516 
517 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
518 
519 			desc->len_ctrl |= (io_sq->phase <<
520 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
521 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
522 		}
523 
524 		desc->len_ctrl |= ena_bufs->len &
525 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
526 
527 		addr_hi = ((ena_bufs->paddr &
528 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
529 
530 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
531 		desc->buff_addr_hi_hdr_sz |= addr_hi &
532 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
533 		ena_bufs++;
534 	}
535 
536 	/* set the last desc indicator */
537 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
538 
539 	rc = ena_com_sq_update_tail(io_sq);
540 	if (unlikely(rc)) {
541 		ena_trc_err("failed to update sq tail of the last descriptor\n");
542 		return rc;
543 	}
544 
545 	rc = ena_com_close_bounce_buffer(io_sq);
546 	if (rc)
547 		ena_trc_err("failed when closing bounce buffer\n");
548 
549 	*nb_hw_desc = io_sq->tail - start_tail;
550 	return rc;
551 }
552 
553 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
554 		   struct ena_com_io_sq *io_sq,
555 		   struct ena_com_rx_ctx *ena_rx_ctx)
556 {
557 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
558 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
559 	u16 cdesc_idx = 0;
560 	u16 nb_hw_desc;
561 	u16 i = 0;
562 
563 	ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
564 		 "wrong Q type");
565 
566 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
567 	if (nb_hw_desc == 0) {
568 		ena_rx_ctx->descs = nb_hw_desc;
569 		return 0;
570 	}
571 
572 	ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
573 		    io_cq->qid, nb_hw_desc);
574 
575 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
576 		ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
577 			    nb_hw_desc, ena_rx_ctx->max_bufs);
578 		return ENA_COM_NO_SPACE;
579 	}
580 
581 	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
582 	ena_rx_ctx->pkt_offset = cdesc->offset;
583 
584 	do {
585 		ena_buf->len = cdesc->length;
586 		ena_buf->req_id = cdesc->req_id;
587 		ena_buf++;
588 	} while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i)));
589 
590 	/* Update SQ head ptr */
591 	io_sq->next_to_comp += nb_hw_desc;
592 
593 	ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
594 		    io_sq->qid, io_sq->next_to_comp);
595 
596 	/* Get rx flags from the last pkt */
597 	ena_com_rx_set_flags(ena_rx_ctx, cdesc);
598 
599 	ena_rx_ctx->descs = nb_hw_desc;
600 	return 0;
601 }
602 
603 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
604 			       struct ena_com_buf *ena_buf,
605 			       u16 req_id)
606 {
607 	struct ena_eth_io_rx_desc *desc;
608 
609 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
610 		 "wrong Q type");
611 
612 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
613 		return ENA_COM_NO_SPACE;
614 
615 	desc = get_sq_desc(io_sq);
616 	if (unlikely(!desc))
617 		return ENA_COM_FAULT;
618 
619 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
620 
621 	desc->length = ena_buf->len;
622 
623 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
624 		ENA_ETH_IO_RX_DESC_LAST_MASK |
625 		(io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
626 		ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
627 
628 	desc->req_id = req_id;
629 
630 	desc->buff_addr_lo = (u32)ena_buf->paddr;
631 	desc->buff_addr_hi =
632 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
633 
634 	return ena_com_sq_update_tail(io_sq);
635 }
636 
637 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
638 {
639 	struct ena_eth_io_rx_cdesc_base *cdesc;
640 
641 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
642 	if (cdesc)
643 		return false;
644 	else
645 		return true;
646 }
647