1 /*-
2  * BSD LICENSE
3  *
4  * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ena_eth_com.h"
35 
36 static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 	struct ena_com_io_cq *io_cq)
38 {
39 	struct ena_eth_io_rx_cdesc_base *cdesc;
40 	u16 expected_phase, head_masked;
41 	u16 desc_phase;
42 
43 	head_masked = io_cq->head & (io_cq->q_depth - 1);
44 	expected_phase = io_cq->phase;
45 
46 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
48 
49 	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51 
52 	if (desc_phase != expected_phase)
53 		return NULL;
54 
55 	return cdesc;
56 }
57 
58 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
59 {
60 	io_cq->head++;
61 
62 	/* Switch phase bit in case of wrap around */
63 	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
64 		io_cq->phase ^= 1;
65 }
66 
67 static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
68 {
69 	u16 tail_masked;
70 	u32 offset;
71 
72 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
73 
74 	offset = tail_masked * io_sq->desc_entry_size;
75 
76 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
77 }
78 
79 static inline void ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
80 						      u8 *bounce_buffer)
81 {
82 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
83 
84 	u16 dst_tail_mask;
85 	u32 dst_offset;
86 
87 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
88 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
89 
90 	/* Make sure everything was written into the bounce buffer before
91 	 * writing the bounce buffer to the device
92 	 */
93 	wmb();
94 
95 	/* The line is completed. Copy it to dev */
96 	ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
97 				bounce_buffer,
98 				llq_info->desc_list_entry_size);
99 
100 	io_sq->tail++;
101 
102 	/* Switch phase bit in case of wrap around */
103 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
104 		io_sq->phase ^= 1;
105 }
106 
107 static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
108 						 u8 *header_src,
109 						 u16 header_len)
110 {
111 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
112 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
113 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
114 	u16 header_offset;
115 
116 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
117 		return 0;
118 
119 	header_offset =
120 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
121 
122 	if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
123 		ena_trc_err("trying to write header larger than llq entry can accommodate\n");
124 		return ENA_COM_FAULT;
125 	}
126 
127 	if (unlikely(!bounce_buffer)) {
128 		ena_trc_err("bounce buffer is NULL\n");
129 		return ENA_COM_FAULT;
130 	}
131 
132 	memcpy(bounce_buffer + header_offset, header_src, header_len);
133 
134 	return 0;
135 }
136 
137 static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
138 {
139 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
140 	u8 *bounce_buffer;
141 	void *sq_desc;
142 
143 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
144 
145 	if (unlikely(!bounce_buffer)) {
146 		ena_trc_err("bounce buffer is NULL\n");
147 		return NULL;
148 	}
149 
150 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
151 	pkt_ctrl->idx++;
152 	pkt_ctrl->descs_left_in_line--;
153 
154 	return sq_desc;
155 }
156 
157 static inline void ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
158 {
159 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
160 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
161 
162 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
163 		return;
164 
165 	/* bounce buffer was used, so write it and get a new one */
166 	if (pkt_ctrl->idx) {
167 		ena_com_write_bounce_buffer_to_dev(io_sq,
168 						   pkt_ctrl->curr_bounce_buf);
169 		pkt_ctrl->curr_bounce_buf =
170 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
171 			memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
172 			       0x0, llq_info->desc_list_entry_size);
173 	}
174 
175 	pkt_ctrl->idx = 0;
176 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
177 }
178 
179 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
180 {
181 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
182 		return get_sq_desc_llq(io_sq);
183 
184 	return get_sq_desc_regular_queue(io_sq);
185 }
186 
187 static inline void ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
188 {
189 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
190 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
191 
192 	if (!pkt_ctrl->descs_left_in_line) {
193 		ena_com_write_bounce_buffer_to_dev(io_sq,
194 						   pkt_ctrl->curr_bounce_buf);
195 
196 		pkt_ctrl->curr_bounce_buf =
197 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
198 			memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
199 			       0x0, llq_info->desc_list_entry_size);
200 
201 		pkt_ctrl->idx = 0;
202 		if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
203 			pkt_ctrl->descs_left_in_line = 1;
204 		else
205 			pkt_ctrl->descs_left_in_line =
206 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
207 	}
208 }
209 
210 static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
211 {
212 
213 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
214 		ena_com_sq_update_llq_tail(io_sq);
215 		return;
216 	}
217 
218 	io_sq->tail++;
219 
220 	/* Switch phase bit in case of wrap around */
221 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
222 		io_sq->phase ^= 1;
223 }
224 
225 static inline struct ena_eth_io_rx_cdesc_base *
226 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
227 {
228 	idx &= (io_cq->q_depth - 1);
229 	return (struct ena_eth_io_rx_cdesc_base *)
230 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
231 		idx * io_cq->cdesc_entry_size_in_bytes);
232 }
233 
234 static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
235 					   u16 *first_cdesc_idx)
236 {
237 	struct ena_eth_io_rx_cdesc_base *cdesc;
238 	u16 count = 0, head_masked;
239 	u32 last = 0;
240 
241 	do {
242 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
243 		if (!cdesc)
244 			break;
245 
246 		ena_com_cq_inc_head(io_cq);
247 		count++;
248 		last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
249 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
250 	} while (!last);
251 
252 	if (last) {
253 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
254 		count += io_cq->cur_rx_pkt_cdesc_count;
255 
256 		head_masked = io_cq->head & (io_cq->q_depth - 1);
257 
258 		io_cq->cur_rx_pkt_cdesc_count = 0;
259 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
260 
261 		ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
262 			    io_cq->qid, *first_cdesc_idx, count);
263 	} else {
264 		io_cq->cur_rx_pkt_cdesc_count += count;
265 		count = 0;
266 	}
267 
268 	return count;
269 }
270 
271 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
272 					     struct ena_com_tx_ctx *ena_tx_ctx)
273 {
274 	int rc;
275 
276 	if (ena_tx_ctx->meta_valid) {
277 		rc = memcmp(&io_sq->cached_tx_meta,
278 			    &ena_tx_ctx->ena_meta,
279 			    sizeof(struct ena_com_tx_meta));
280 
281 		if (unlikely(rc != 0))
282 			return true;
283 	}
284 
285 	return false;
286 }
287 
288 static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
289 							 struct ena_com_tx_ctx *ena_tx_ctx)
290 {
291 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
292 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
293 
294 	meta_desc = get_sq_desc(io_sq);
295 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
296 
297 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
298 
299 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
300 
301 	/* bits 0-9 of the mss */
302 	meta_desc->word2 |= (ena_meta->mss <<
303 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
304 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
305 	/* bits 10-13 of the mss */
306 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
307 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
308 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
309 
310 	/* Extended meta desc */
311 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
312 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
313 	meta_desc->len_ctrl |= (io_sq->phase <<
314 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
315 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
316 
317 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
318 	meta_desc->word2 |= ena_meta->l3_hdr_len &
319 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
320 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
321 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
322 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
323 
324 	meta_desc->word2 |= (ena_meta->l4_hdr_len <<
325 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
326 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
327 
328 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
329 
330 	/* Cached the meta desc */
331 	memcpy(&io_sq->cached_tx_meta, ena_meta,
332 	       sizeof(struct ena_com_tx_meta));
333 
334 	ena_com_sq_update_tail(io_sq);
335 }
336 
337 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
338 					struct ena_eth_io_rx_cdesc_base *cdesc)
339 {
340 	ena_rx_ctx->l3_proto = cdesc->status &
341 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
342 	ena_rx_ctx->l4_proto =
343 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
344 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
345 	ena_rx_ctx->l3_csum_err =
346 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
347 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
348 	ena_rx_ctx->l4_csum_err =
349 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
350 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
351 	ena_rx_ctx->hash = cdesc->hash;
352 	ena_rx_ctx->frag =
353 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
354 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
355 
356 	ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
357 		    ena_rx_ctx->l3_proto,
358 		    ena_rx_ctx->l4_proto,
359 		    ena_rx_ctx->l3_csum_err,
360 		    ena_rx_ctx->l4_csum_err,
361 		    ena_rx_ctx->hash,
362 		    ena_rx_ctx->frag,
363 		    cdesc->status);
364 }
365 
366 /*****************************************************************************/
367 /*****************************     API      **********************************/
368 /*****************************************************************************/
369 
370 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
371 		       struct ena_com_tx_ctx *ena_tx_ctx,
372 		       int *nb_hw_desc)
373 {
374 	struct ena_eth_io_tx_desc *desc = NULL;
375 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
376 	void *buffer_to_push = ena_tx_ctx->push_header;
377 	u16 header_len = ena_tx_ctx->header_len;
378 	u16 num_bufs = ena_tx_ctx->num_bufs;
379 	u16 start_tail = io_sq->tail;
380 	int i, rc;
381 	bool have_meta;
382 	u64 addr_hi;
383 
384 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
385 		 "wrong Q type");
386 
387 	/* num_bufs +1 for potential meta desc */
388 	if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
389 		ena_trc_err("Not enough space in the tx queue\n");
390 		return ENA_COM_NO_MEM;
391 	}
392 
393 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
394 		ena_trc_err("header size is too large %d max header: %d\n",
395 			    header_len, io_sq->tx_max_header_size);
396 		return ENA_COM_INVAL;
397 	}
398 
399 	if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && !buffer_to_push))
400 		return ENA_COM_INVAL;
401 
402 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
403 	if (unlikely(rc))
404 		return rc;
405 
406 	have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
407 			ena_tx_ctx);
408 	if (have_meta)
409 		ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
410 
411 	/* If the caller doesn't want send packets */
412 	if (unlikely(!num_bufs && !header_len)) {
413 		ena_com_close_bounce_buffer(io_sq);
414 		*nb_hw_desc = io_sq->tail - start_tail;
415 		return 0;
416 	}
417 
418 	desc = get_sq_desc(io_sq);
419 	if (unlikely(!desc))
420 		return ENA_COM_FAULT;
421 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
422 
423 	/* Set first desc when we don't have meta descriptor */
424 	if (!have_meta)
425 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
426 
427 	desc->buff_addr_hi_hdr_sz |= (header_len <<
428 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
429 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
430 	desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
431 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
432 
433 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
434 
435 	/* Bits 0-9 */
436 	desc->meta_ctrl |= (ena_tx_ctx->req_id <<
437 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
438 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
439 
440 	desc->meta_ctrl |= (ena_tx_ctx->df <<
441 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
442 		ENA_ETH_IO_TX_DESC_DF_MASK;
443 
444 	/* Bits 10-15 */
445 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
446 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
447 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
448 
449 	if (ena_tx_ctx->meta_valid) {
450 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
451 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
452 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
453 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
454 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
455 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
456 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
457 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
458 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
459 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
460 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
461 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
462 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
463 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
464 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
465 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
466 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
467 	}
468 
469 	for (i = 0; i < num_bufs; i++) {
470 		/* The first desc share the same desc as the header */
471 		if (likely(i != 0)) {
472 			ena_com_sq_update_tail(io_sq);
473 
474 			desc = get_sq_desc(io_sq);
475 			if (unlikely(!desc))
476 				return ENA_COM_FAULT;
477 
478 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
479 
480 			desc->len_ctrl |= (io_sq->phase <<
481 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
482 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
483 		}
484 
485 		desc->len_ctrl |= ena_bufs->len &
486 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
487 
488 		addr_hi = ((ena_bufs->paddr &
489 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
490 
491 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
492 		desc->buff_addr_hi_hdr_sz |= addr_hi &
493 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
494 		ena_bufs++;
495 	}
496 
497 	/* set the last desc indicator */
498 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
499 
500 	ena_com_sq_update_tail(io_sq);
501 
502 	ena_com_close_bounce_buffer(io_sq);
503 
504 	*nb_hw_desc = io_sq->tail - start_tail;
505 	return 0;
506 }
507 
508 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
509 		   struct ena_com_io_sq *io_sq,
510 		   struct ena_com_rx_ctx *ena_rx_ctx)
511 {
512 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
513 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
514 	u16 cdesc_idx = 0;
515 	u16 nb_hw_desc;
516 	u16 i;
517 
518 	ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
519 		 "wrong Q type");
520 
521 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
522 	if (nb_hw_desc == 0) {
523 		ena_rx_ctx->descs = nb_hw_desc;
524 		return 0;
525 	}
526 
527 	ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
528 		    io_cq->qid, nb_hw_desc);
529 
530 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
531 		ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
532 			    nb_hw_desc, ena_rx_ctx->max_bufs);
533 		return ENA_COM_NO_SPACE;
534 	}
535 
536 	for (i = 0; i < nb_hw_desc; i++) {
537 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
538 
539 		ena_buf->len = cdesc->length;
540 		ena_buf->req_id = cdesc->req_id;
541 		ena_buf++;
542 	}
543 
544 	/* Update SQ head ptr */
545 	io_sq->next_to_comp += nb_hw_desc;
546 
547 	ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
548 		    io_sq->qid, io_sq->next_to_comp);
549 
550 	/* Get rx flags from the last pkt */
551 	ena_com_rx_set_flags(ena_rx_ctx, cdesc);
552 
553 	ena_rx_ctx->descs = nb_hw_desc;
554 	return 0;
555 }
556 
557 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
558 			       struct ena_com_buf *ena_buf,
559 			       u16 req_id)
560 {
561 	struct ena_eth_io_rx_desc *desc;
562 
563 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
564 		 "wrong Q type");
565 
566 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
567 		return ENA_COM_NO_SPACE;
568 
569 	desc = get_sq_desc(io_sq);
570 	if (unlikely(!desc))
571 		return ENA_COM_FAULT;
572 
573 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
574 
575 	desc->length = ena_buf->len;
576 
577 	desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
578 	desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
579 	desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
580 	desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
581 
582 	desc->req_id = req_id;
583 
584 	desc->buff_addr_lo = (u32)ena_buf->paddr;
585 	desc->buff_addr_hi =
586 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
587 
588 	ena_com_sq_update_tail(io_sq);
589 
590 	return 0;
591 }
592 
593 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
594 {
595 	u8 expected_phase, cdesc_phase;
596 	struct ena_eth_io_tx_cdesc *cdesc;
597 	u16 masked_head;
598 
599 	masked_head = io_cq->head & (io_cq->q_depth - 1);
600 	expected_phase = io_cq->phase;
601 
602 	cdesc = (struct ena_eth_io_tx_cdesc *)
603 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
604 		(masked_head * io_cq->cdesc_entry_size_in_bytes));
605 
606 	/* When the current completion descriptor phase isn't the same as the
607 	 * expected, it mean that the device still didn't update
608 	 * this completion.
609 	 */
610 	cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
611 	if (cdesc_phase != expected_phase)
612 		return ENA_COM_TRY_AGAIN;
613 
614 	if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
615 		ena_trc_err("Invalid req id %d\n", cdesc->req_id);
616 		return ENA_COM_INVAL;
617 	}
618 
619 	ena_com_cq_inc_head(io_cq);
620 
621 	*req_id = READ_ONCE(cdesc->req_id);
622 
623 	return 0;
624 }
625