1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "ena_eth_com.h"
35
ena_com_get_next_rx_cdesc(struct ena_com_io_cq * io_cq)36 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 struct ena_com_io_cq *io_cq)
38 {
39 struct ena_eth_io_rx_cdesc_base *cdesc;
40 u16 expected_phase, head_masked;
41 u16 desc_phase;
42
43 head_masked = io_cq->head & (io_cq->q_depth - 1);
44 expected_phase = io_cq->phase;
45
46 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
48
49 desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51
52 if (desc_phase != expected_phase)
53 return NULL;
54
55 /* Make sure we read the rest of the descriptor after the phase bit
56 * has been read
57 */
58 dma_rmb();
59
60 return cdesc;
61 }
62
get_sq_desc_regular_queue(struct ena_com_io_sq * io_sq)63 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
64 {
65 u16 tail_masked;
66 u32 offset;
67
68 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
69
70 offset = tail_masked * io_sq->desc_entry_size;
71
72 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
73 }
74
ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq * io_sq,u8 * bounce_buffer)75 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
76 u8 *bounce_buffer)
77 {
78 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
79
80 u16 dst_tail_mask;
81 u32 dst_offset;
82
83 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
84 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
85
86 if (is_llq_max_tx_burst_exists(io_sq)) {
87 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
88 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
89 "Error: trying to send more packets than tx burst allows\n");
90 return ENA_COM_NO_SPACE;
91 }
92
93 io_sq->entries_in_tx_burst_left--;
94 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
95 "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
96 io_sq->qid, io_sq->entries_in_tx_burst_left);
97 }
98
99 /* Make sure everything was written into the bounce buffer before
100 * writing the bounce buffer to the device
101 */
102 wmb();
103
104 /* The line is completed. Copy it to dev */
105 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
106 bounce_buffer,
107 llq_info->desc_list_entry_size);
108
109 io_sq->tail++;
110
111 /* Switch phase bit in case of wrap around */
112 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
113 io_sq->phase ^= 1;
114
115 return ENA_COM_OK;
116 }
117
ena_com_write_header_to_bounce(struct ena_com_io_sq * io_sq,u8 * header_src,u16 header_len)118 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
119 u8 *header_src,
120 u16 header_len)
121 {
122 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
123 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
124 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
125 u16 header_offset;
126
127 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
128 return 0;
129
130 header_offset =
131 llq_info->descs_num_before_header * io_sq->desc_entry_size;
132
133 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
134 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
135 "Trying to write header larger than llq entry can accommodate\n");
136 return ENA_COM_FAULT;
137 }
138
139 if (unlikely(!bounce_buffer)) {
140 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
141 "Bounce buffer is NULL\n");
142 return ENA_COM_FAULT;
143 }
144
145 memcpy(bounce_buffer + header_offset, header_src, header_len);
146
147 return 0;
148 }
149
get_sq_desc_llq(struct ena_com_io_sq * io_sq)150 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
151 {
152 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
153 u8 *bounce_buffer;
154 void *sq_desc;
155
156 bounce_buffer = pkt_ctrl->curr_bounce_buf;
157
158 if (unlikely(!bounce_buffer)) {
159 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
160 "Bounce buffer is NULL\n");
161 return NULL;
162 }
163
164 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
165 pkt_ctrl->idx++;
166 pkt_ctrl->descs_left_in_line--;
167
168 return sq_desc;
169 }
170
ena_com_close_bounce_buffer(struct ena_com_io_sq * io_sq)171 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
172 {
173 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
174 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
175 int rc;
176
177 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
178 return ENA_COM_OK;
179
180 /* bounce buffer was used, so write it and get a new one */
181 if (likely(pkt_ctrl->idx)) {
182 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
183 pkt_ctrl->curr_bounce_buf);
184 if (unlikely(rc)) {
185 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
186 "Failed to write bounce buffer to device\n");
187 return rc;
188 }
189
190 pkt_ctrl->curr_bounce_buf =
191 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
192 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
193 0x0, llq_info->desc_list_entry_size);
194 }
195
196 pkt_ctrl->idx = 0;
197 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
198 return ENA_COM_OK;
199 }
200
get_sq_desc(struct ena_com_io_sq * io_sq)201 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
202 {
203 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
204 return get_sq_desc_llq(io_sq);
205
206 return get_sq_desc_regular_queue(io_sq);
207 }
208
ena_com_sq_update_llq_tail(struct ena_com_io_sq * io_sq)209 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
210 {
211 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
212 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
213 int rc;
214
215 if (!pkt_ctrl->descs_left_in_line) {
216 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
217 pkt_ctrl->curr_bounce_buf);
218 if (unlikely(rc)) {
219 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
220 "Failed to write bounce buffer to device\n");
221 return rc;
222 }
223
224 pkt_ctrl->curr_bounce_buf =
225 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
226 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
227 0x0, llq_info->desc_list_entry_size);
228
229 pkt_ctrl->idx = 0;
230 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
231 pkt_ctrl->descs_left_in_line = 1;
232 else
233 pkt_ctrl->descs_left_in_line =
234 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
235 }
236
237 return ENA_COM_OK;
238 }
239
ena_com_sq_update_tail(struct ena_com_io_sq * io_sq)240 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
241 {
242 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
243 return ena_com_sq_update_llq_tail(io_sq);
244
245 io_sq->tail++;
246
247 /* Switch phase bit in case of wrap around */
248 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
249 io_sq->phase ^= 1;
250
251 return ENA_COM_OK;
252 }
253
254 static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)255 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
256 {
257 idx &= (io_cq->q_depth - 1);
258 return (struct ena_eth_io_rx_cdesc_base *)
259 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
260 idx * io_cq->cdesc_entry_size_in_bytes);
261 }
262
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq * io_cq,u16 * first_cdesc_idx,u16 * num_descs)263 static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
264 u16 *first_cdesc_idx,
265 u16 *num_descs)
266 {
267 u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
268 struct ena_eth_io_rx_cdesc_base *cdesc;
269 u32 last = 0;
270
271 do {
272 u32 status;
273
274 cdesc = ena_com_get_next_rx_cdesc(io_cq);
275 if (!cdesc)
276 break;
277 status = READ_ONCE32(cdesc->status);
278
279 ena_com_cq_inc_head(io_cq);
280 if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
281 ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
282 struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
283
284 ena_trc_err(dev,
285 "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
286 count, io_cq->qid, cdesc->req_id);
287 return ENA_COM_FAULT;
288 }
289 count++;
290 last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
291 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
292 } while (!last);
293
294 if (last) {
295 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
296
297 head_masked = io_cq->head & (io_cq->q_depth - 1);
298
299 *num_descs = count;
300 io_cq->cur_rx_pkt_cdesc_count = 0;
301 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
302
303 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
304 "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
305 io_cq->qid, *first_cdesc_idx, count);
306 } else {
307 io_cq->cur_rx_pkt_cdesc_count = count;
308 *num_descs = 0;
309 }
310
311 return ENA_COM_OK;
312 }
313
ena_com_create_meta(struct ena_com_io_sq * io_sq,struct ena_com_tx_meta * ena_meta)314 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
315 struct ena_com_tx_meta *ena_meta)
316 {
317 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
318
319 meta_desc = get_sq_desc(io_sq);
320 if (unlikely(!meta_desc))
321 return ENA_COM_FAULT;
322
323 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
324
325 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
326
327 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
328
329 /* bits 0-9 of the mss */
330 meta_desc->word2 |= ((u32)ena_meta->mss <<
331 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
332 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
333 /* bits 10-13 of the mss */
334 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
335 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
336 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
337
338 /* Extended meta desc */
339 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
340 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
341 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
342 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
343
344 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
345 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
346
347 meta_desc->word2 |= ena_meta->l3_hdr_len &
348 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
349 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
350 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
351 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
352
353 meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
354 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
355 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
356
357 return ena_com_sq_update_tail(io_sq);
358 }
359
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,bool * have_meta)360 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
361 struct ena_com_tx_ctx *ena_tx_ctx,
362 bool *have_meta)
363 {
364 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
365
366 /* When disable meta caching is set, don't bother to save the meta and
367 * compare it to the stored version, just create the meta
368 */
369 if (io_sq->disable_meta_caching) {
370 *have_meta = true;
371 return ena_com_create_meta(io_sq, ena_meta);
372 }
373
374 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
375 *have_meta = true;
376 /* Cache the meta desc */
377 memcpy(&io_sq->cached_tx_meta, ena_meta,
378 sizeof(struct ena_com_tx_meta));
379 return ena_com_create_meta(io_sq, ena_meta);
380 }
381
382 *have_meta = false;
383 return ENA_COM_OK;
384 }
385
ena_com_rx_set_flags(struct ena_com_io_cq * io_cq,struct ena_com_rx_ctx * ena_rx_ctx,struct ena_eth_io_rx_cdesc_base * cdesc)386 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
387 struct ena_com_rx_ctx *ena_rx_ctx,
388 struct ena_eth_io_rx_cdesc_base *cdesc)
389 {
390 ena_rx_ctx->l3_proto = cdesc->status &
391 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
392 ena_rx_ctx->l4_proto =
393 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
394 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
395 ena_rx_ctx->l3_csum_err =
396 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
397 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
398 ena_rx_ctx->l4_csum_err =
399 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
400 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
401 ena_rx_ctx->l4_csum_checked =
402 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
403 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
404 ena_rx_ctx->hash = cdesc->hash;
405 ena_rx_ctx->frag =
406 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
407 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
408
409 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
410 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
411 ena_rx_ctx->l3_proto,
412 ena_rx_ctx->l4_proto,
413 ena_rx_ctx->l3_csum_err,
414 ena_rx_ctx->l4_csum_err,
415 ena_rx_ctx->hash,
416 ena_rx_ctx->frag,
417 cdesc->status);
418 }
419
420 /*****************************************************************************/
421 /***************************** API **********************************/
422 /*****************************************************************************/
423
ena_com_prepare_tx(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,int * nb_hw_desc)424 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
425 struct ena_com_tx_ctx *ena_tx_ctx,
426 int *nb_hw_desc)
427 {
428 struct ena_eth_io_tx_desc *desc = NULL;
429 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
430 void *buffer_to_push = ena_tx_ctx->push_header;
431 u16 header_len = ena_tx_ctx->header_len;
432 u16 num_bufs = ena_tx_ctx->num_bufs;
433 u16 start_tail = io_sq->tail;
434 int i, rc;
435 bool have_meta;
436 u64 addr_hi;
437
438 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
439 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
440
441 /* num_bufs +1 for potential meta desc */
442 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
443 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
444 "Not enough space in the tx queue\n");
445 return ENA_COM_NO_MEM;
446 }
447
448 if (unlikely(header_len > io_sq->tx_max_header_size)) {
449 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
450 "Header size is too large %d max header: %d\n",
451 header_len, io_sq->tx_max_header_size);
452 return ENA_COM_INVAL;
453 }
454
455 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
456 && !buffer_to_push)) {
457 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
458 "Push header wasn't provided in LLQ mode\n");
459 return ENA_COM_INVAL;
460 }
461
462 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
463 if (unlikely(rc))
464 return rc;
465
466 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
467 if (unlikely(rc)) {
468 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
469 "Failed to create and store tx meta desc\n");
470 return rc;
471 }
472
473 /* If the caller doesn't want to send packets */
474 if (unlikely(!num_bufs && !header_len)) {
475 rc = ena_com_close_bounce_buffer(io_sq);
476 if (rc)
477 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
478 "Failed to write buffers to LLQ\n");
479 *nb_hw_desc = io_sq->tail - start_tail;
480 return rc;
481 }
482
483 desc = get_sq_desc(io_sq);
484 if (unlikely(!desc))
485 return ENA_COM_FAULT;
486 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
487
488 /* Set first desc when we don't have meta descriptor */
489 if (!have_meta)
490 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
491
492 desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
493 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
494 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
495 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
496 ENA_ETH_IO_TX_DESC_PHASE_MASK;
497
498 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
499
500 /* Bits 0-9 */
501 desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
502 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
503 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
504
505 desc->meta_ctrl |= (ena_tx_ctx->df <<
506 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
507 ENA_ETH_IO_TX_DESC_DF_MASK;
508
509 /* Bits 10-15 */
510 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
511 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
512 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
513
514 if (ena_tx_ctx->meta_valid) {
515 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
516 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
517 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
518 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
519 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
520 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
521 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
522 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
523 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
524 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
525 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
526 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
527 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
528 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
529 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
530 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
531 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
532 }
533
534 for (i = 0; i < num_bufs; i++) {
535 /* The first desc share the same desc as the header */
536 if (likely(i != 0)) {
537 rc = ena_com_sq_update_tail(io_sq);
538 if (unlikely(rc)) {
539 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
540 "Failed to update sq tail\n");
541 return rc;
542 }
543
544 desc = get_sq_desc(io_sq);
545 if (unlikely(!desc))
546 return ENA_COM_FAULT;
547
548 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
549
550 desc->len_ctrl |= ((u32)io_sq->phase <<
551 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
552 ENA_ETH_IO_TX_DESC_PHASE_MASK;
553 }
554
555 desc->len_ctrl |= ena_bufs->len &
556 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
557
558 addr_hi = ((ena_bufs->paddr &
559 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
560
561 desc->buff_addr_lo = (u32)ena_bufs->paddr;
562 desc->buff_addr_hi_hdr_sz |= addr_hi &
563 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
564 ena_bufs++;
565 }
566
567 /* set the last desc indicator */
568 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
569
570 rc = ena_com_sq_update_tail(io_sq);
571 if (unlikely(rc)) {
572 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
573 "Failed to update sq tail of the last descriptor\n");
574 return rc;
575 }
576
577 rc = ena_com_close_bounce_buffer(io_sq);
578
579 *nb_hw_desc = io_sq->tail - start_tail;
580 return rc;
581 }
582
ena_com_rx_pkt(struct ena_com_io_cq * io_cq,struct ena_com_io_sq * io_sq,struct ena_com_rx_ctx * ena_rx_ctx)583 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
584 struct ena_com_io_sq *io_sq,
585 struct ena_com_rx_ctx *ena_rx_ctx)
586 {
587 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
588 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
589 u16 q_depth = io_cq->q_depth;
590 u16 cdesc_idx = 0;
591 u16 nb_hw_desc;
592 u16 i = 0;
593 int rc;
594
595 ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
596 ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
597
598 rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
599 if (unlikely(rc != ENA_COM_OK))
600 return ENA_COM_FAULT;
601
602 if (nb_hw_desc == 0) {
603 ena_rx_ctx->descs = nb_hw_desc;
604 return 0;
605 }
606
607 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
608 "Fetch rx packet: queue %d completed desc: %d\n",
609 io_cq->qid, nb_hw_desc);
610
611 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
612 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
613 "Too many RX cdescs (%d) > MAX(%d)\n",
614 nb_hw_desc, ena_rx_ctx->max_bufs);
615 return ENA_COM_NO_SPACE;
616 }
617
618 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
619 ena_rx_ctx->pkt_offset = cdesc->offset;
620
621 do {
622 ena_buf[i].len = cdesc->length;
623 ena_buf[i].req_id = cdesc->req_id;
624 if (unlikely(ena_buf[i].req_id >= q_depth))
625 return ENA_COM_EIO;
626
627 if (++i >= nb_hw_desc)
628 break;
629
630 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
631
632 } while (1);
633
634 /* Update SQ head ptr */
635 io_sq->next_to_comp += nb_hw_desc;
636
637 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
638 "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
639 io_sq->qid, io_sq->next_to_comp);
640
641 /* Get rx flags from the last pkt */
642 ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
643
644 ena_rx_ctx->descs = nb_hw_desc;
645
646 return 0;
647 }
648
ena_com_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct ena_com_buf * ena_buf,u16 req_id)649 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
650 struct ena_com_buf *ena_buf,
651 u16 req_id)
652 {
653 struct ena_eth_io_rx_desc *desc;
654
655 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
656 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
657
658 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
659 return ENA_COM_NO_SPACE;
660
661 desc = get_sq_desc(io_sq);
662 if (unlikely(!desc))
663 return ENA_COM_FAULT;
664
665 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
666
667 desc->length = ena_buf->len;
668
669 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
670 ENA_ETH_IO_RX_DESC_LAST_MASK |
671 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
672 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
673
674 desc->req_id = req_id;
675
676 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
677 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
678 __func__, io_sq->qid, req_id);
679
680 desc->buff_addr_lo = (u32)ena_buf->paddr;
681 desc->buff_addr_hi =
682 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
683
684 return ena_com_sq_update_tail(io_sq);
685 }
686
ena_com_cq_empty(struct ena_com_io_cq * io_cq)687 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
688 {
689 struct ena_eth_io_rx_cdesc_base *cdesc;
690
691 cdesc = ena_com_get_next_rx_cdesc(io_cq);
692 if (cdesc)
693 return false;
694 else
695 return true;
696 }
697