1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 #ifdef DEV_NETMAP
32
33 #include "ena.h"
34 #include "ena_netmap.h"
35
36 #define ENA_NETMAP_MORE_FRAMES 1
37 #define ENA_NETMAP_NO_MORE_FRAMES 0
38 #define ENA_MAX_FRAMES 16384
39
40 struct ena_netmap_ctx {
41 struct netmap_kring *kring;
42 struct ena_adapter *adapter;
43 struct netmap_adapter *na;
44 struct netmap_slot *slots;
45 struct ena_ring *ring;
46 struct ena_com_io_cq *io_cq;
47 struct ena_com_io_sq *io_sq;
48 u_int nm_i;
49 uint16_t nt;
50 uint16_t lim;
51 };
52
53 /* Netmap callbacks */
54 static int ena_netmap_reg(struct netmap_adapter *, int);
55 static int ena_netmap_txsync(struct netmap_kring *, int);
56 static int ena_netmap_rxsync(struct netmap_kring *, int);
57
58 /* Helper functions */
59 static int ena_netmap_tx_frames(struct ena_netmap_ctx *);
60 static int ena_netmap_tx_frame(struct ena_netmap_ctx *);
61 static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *);
62 static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int,
63 uint16_t);
64 static int ena_netmap_copy_data(struct netmap_adapter *, struct netmap_slot *,
65 u_int, uint16_t, uint16_t, void *);
66 static int ena_netmap_map_single_slot(struct netmap_adapter *,
67 struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *);
68 static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *,
69 struct ena_tx_buffer *, void **, uint16_t *, uint16_t *);
70 static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *,
71 struct ena_tx_buffer *);
72 static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *);
73 static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *, uint16_t);
74 static inline int validate_tx_req_id(struct ena_ring *, uint16_t);
75 static int ena_netmap_rx_frames(struct ena_netmap_ctx *);
76 static int ena_netmap_rx_frame(struct ena_netmap_ctx *);
77 static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, int *);
78 static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *);
79 static void ena_netmap_fill_ctx(struct netmap_kring *, struct ena_netmap_ctx *,
80 uint16_t);
81
82 int
ena_netmap_attach(struct ena_adapter * adapter)83 ena_netmap_attach(struct ena_adapter *adapter)
84 {
85 struct netmap_adapter na;
86
87 ena_log_nm(adapter->pdev, INFO, "netmap attach\n");
88
89 bzero(&na, sizeof(na));
90 na.na_flags = NAF_MOREFRAG;
91 na.ifp = adapter->ifp;
92 na.num_tx_desc = adapter->requested_tx_ring_size;
93 na.num_rx_desc = adapter->requested_rx_ring_size;
94 na.num_tx_rings = adapter->num_io_queues;
95 na.num_rx_rings = adapter->num_io_queues;
96 na.rx_buf_maxsize = adapter->buf_ring_size;
97 na.nm_txsync = ena_netmap_txsync;
98 na.nm_rxsync = ena_netmap_rxsync;
99 na.nm_register = ena_netmap_reg;
100
101 return (netmap_attach(&na));
102 }
103
104 int
ena_netmap_alloc_rx_slot(struct ena_adapter * adapter,struct ena_ring * rx_ring,struct ena_rx_buffer * rx_info)105 ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
106 struct ena_rx_buffer *rx_info)
107 {
108 struct netmap_adapter *na = NA(adapter->ifp);
109 struct netmap_kring *kring;
110 struct netmap_ring *ring;
111 struct netmap_slot *slot;
112 void *addr;
113 uint64_t paddr;
114 int nm_i, qid, head, lim, rc;
115
116 /* if previously allocated frag is not used */
117 if (unlikely(rx_info->netmap_buf_idx != 0))
118 return (0);
119
120 qid = rx_ring->qid;
121 kring = na->rx_rings[qid];
122 nm_i = kring->nr_hwcur;
123 head = kring->rhead;
124
125 ena_log_nm(adapter->pdev, DBG,
126 "nr_hwcur: %d, nr_hwtail: %d, rhead: %d, rcur: %d, rtail: %d\n",
127 kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur,
128 kring->rtail);
129
130 if ((nm_i == head) && rx_ring->initialized) {
131 ena_log_nm(adapter->pdev, ERR,
132 "No free slots in netmap ring\n");
133 return (ENOMEM);
134 }
135
136 ring = kring->ring;
137 if (ring == NULL) {
138 ena_log_nm(adapter->pdev, ERR, "Rx ring %d is NULL\n", qid);
139 return (EFAULT);
140 }
141 slot = &ring->slot[nm_i];
142
143 addr = PNMB(na, slot, &paddr);
144 if (addr == NETMAP_BUF_BASE(na)) {
145 ena_log_nm(adapter->pdev, ERR, "Bad buff in slot\n");
146 return (EFAULT);
147 }
148
149 rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr);
150 if (rc != 0) {
151 ena_log_nm(adapter->pdev, WARN, "DMA mapping error\n");
152 return (rc);
153 }
154 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
155
156 rx_info->ena_buf.paddr = paddr;
157 rx_info->ena_buf.len = ring->nr_buf_size;
158 rx_info->mbuf = NULL;
159 rx_info->netmap_buf_idx = slot->buf_idx;
160
161 slot->buf_idx = 0;
162
163 lim = kring->nkr_num_slots - 1;
164 kring->nr_hwcur = nm_next(nm_i, lim);
165
166 return (0);
167 }
168
169 void
ena_netmap_free_rx_slot(struct ena_adapter * adapter,struct ena_ring * rx_ring,struct ena_rx_buffer * rx_info)170 ena_netmap_free_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
171 struct ena_rx_buffer *rx_info)
172 {
173 struct netmap_adapter *na;
174 struct netmap_kring *kring;
175 struct netmap_slot *slot;
176 int nm_i, qid, lim;
177
178 na = NA(adapter->ifp);
179 if (na == NULL) {
180 ena_log_nm(adapter->pdev, ERR, "netmap adapter is NULL\n");
181 return;
182 }
183
184 if (na->rx_rings == NULL) {
185 ena_log_nm(adapter->pdev, ERR, "netmap rings are NULL\n");
186 return;
187 }
188
189 qid = rx_ring->qid;
190 kring = na->rx_rings[qid];
191 if (kring == NULL) {
192 ena_log_nm(adapter->pdev, ERR,
193 "netmap kernel ring %d is NULL\n", qid);
194 return;
195 }
196
197 lim = kring->nkr_num_slots - 1;
198 nm_i = nm_prev(kring->nr_hwcur, lim);
199
200 if (kring->nr_mode != NKR_NETMAP_ON)
201 return;
202
203 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
204 BUS_DMASYNC_POSTREAD);
205 netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map);
206
207 KASSERT(kring->ring != NULL, ("Netmap Rx ring is NULL\n"));
208
209 slot = &kring->ring->slot[nm_i];
210
211 ENA_WARN(slot->buf_idx != 0, adapter->ena_dev, "Overwrite slot buf\n");
212 slot->buf_idx = rx_info->netmap_buf_idx;
213 slot->flags = NS_BUF_CHANGED;
214
215 rx_info->netmap_buf_idx = 0;
216 kring->nr_hwcur = nm_i;
217 }
218
219 static bool
ena_ring_in_netmap(struct ena_adapter * adapter,int qid,enum txrx x)220 ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x)
221 {
222 struct netmap_adapter *na;
223 struct netmap_kring *kring;
224
225 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
226 na = NA(adapter->ifp);
227 kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid];
228 if (kring->nr_mode == NKR_NETMAP_ON)
229 return true;
230 }
231 return false;
232 }
233
234 bool
ena_tx_ring_in_netmap(struct ena_adapter * adapter,int qid)235 ena_tx_ring_in_netmap(struct ena_adapter *adapter, int qid)
236 {
237 return ena_ring_in_netmap(adapter, qid, NR_TX);
238 }
239
240 bool
ena_rx_ring_in_netmap(struct ena_adapter * adapter,int qid)241 ena_rx_ring_in_netmap(struct ena_adapter *adapter, int qid)
242 {
243 return ena_ring_in_netmap(adapter, qid, NR_RX);
244 }
245
246 static void
ena_netmap_reset_ring(struct ena_adapter * adapter,int qid,enum txrx x)247 ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x)
248 {
249 if (!ena_ring_in_netmap(adapter, qid, x))
250 return;
251
252 netmap_reset(NA(adapter->ifp), x, qid, 0);
253 ena_log_nm(adapter->pdev, INFO, "%s ring %d is in netmap mode\n",
254 (x == NR_TX) ? "Tx" : "Rx", qid);
255 }
256
257 void
ena_netmap_reset_rx_ring(struct ena_adapter * adapter,int qid)258 ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid)
259 {
260 ena_netmap_reset_ring(adapter, qid, NR_RX);
261 }
262
263 void
ena_netmap_reset_tx_ring(struct ena_adapter * adapter,int qid)264 ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid)
265 {
266 ena_netmap_reset_ring(adapter, qid, NR_TX);
267 }
268
269 static int
ena_netmap_reg(struct netmap_adapter * na,int onoff)270 ena_netmap_reg(struct netmap_adapter *na, int onoff)
271 {
272 if_t ifp = na->ifp;
273 struct ena_adapter *adapter = if_getsoftc(ifp);
274 device_t pdev = adapter->pdev;
275 struct netmap_kring *kring;
276 enum txrx t;
277 int rc, i;
278
279 ENA_LOCK_LOCK();
280 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
281 ena_down(adapter);
282
283 if (onoff) {
284 ena_log_nm(pdev, INFO, "netmap on\n");
285 for_rx_tx(t) {
286 for (i = 0; i <= nma_get_nrings(na, t); i++) {
287 kring = NMR(na, t)[i];
288 if (nm_kring_pending_on(kring)) {
289 kring->nr_mode = NKR_NETMAP_ON;
290 }
291 }
292 }
293 nm_set_native_flags(na);
294 } else {
295 ena_log_nm(pdev, INFO, "netmap off\n");
296 nm_clear_native_flags(na);
297 for_rx_tx(t) {
298 for (i = 0; i <= nma_get_nrings(na, t); i++) {
299 kring = NMR(na, t)[i];
300 if (nm_kring_pending_off(kring)) {
301 kring->nr_mode = NKR_NETMAP_OFF;
302 }
303 }
304 }
305 }
306
307 rc = ena_up(adapter);
308 if (rc != 0) {
309 ena_log_nm(pdev, WARN, "ena_up failed with rc=%d\n", rc);
310 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
311 nm_clear_native_flags(na);
312 ena_destroy_device(adapter, false);
313 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
314 rc = ena_restore_device(adapter);
315 }
316 ENA_LOCK_UNLOCK();
317
318 return (rc);
319 }
320
321 static int
ena_netmap_txsync(struct netmap_kring * kring,int flags)322 ena_netmap_txsync(struct netmap_kring *kring, int flags)
323 {
324 struct ena_netmap_ctx ctx;
325 int rc = 0;
326
327 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id));
328 ctx.ring = &ctx.adapter->tx_ring[kring->ring_id];
329
330 ENA_RING_MTX_LOCK(ctx.ring);
331 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter)))
332 goto txsync_end;
333
334 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
335 goto txsync_end;
336
337 rc = ena_netmap_tx_frames(&ctx);
338 ena_netmap_tx_cleanup(&ctx);
339
340 txsync_end:
341 ENA_RING_MTX_UNLOCK(ctx.ring);
342 return (rc);
343 }
344
345 static int
ena_netmap_tx_frames(struct ena_netmap_ctx * ctx)346 ena_netmap_tx_frames(struct ena_netmap_ctx *ctx)
347 {
348 struct ena_ring *tx_ring = ctx->ring;
349 int rc = 0;
350
351 ctx->nm_i = ctx->kring->nr_hwcur;
352 ctx->nt = ctx->ring->next_to_use;
353
354 __builtin_prefetch(&ctx->slots[ctx->nm_i]);
355
356 while (ctx->nm_i != ctx->kring->rhead) {
357 if ((rc = ena_netmap_tx_frame(ctx)) != 0) {
358 /*
359 * When there is no empty space in Tx ring, error is
360 * still being returned. It should not be passed to the
361 * netmap, as application knows current ring state from
362 * netmap ring pointers. Returning error there could
363 * cause application to exit, but the Tx ring is
364 * commonly being full.
365 */
366 if (rc == ENA_COM_NO_MEM)
367 rc = 0;
368 break;
369 }
370 tx_ring->acum_pkts++;
371 }
372
373 /* If any packet was sent... */
374 if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) {
375 /* ...send the doorbell to the device. */
376 ena_ring_tx_doorbell(tx_ring);
377
378 ctx->ring->next_to_use = ctx->nt;
379 ctx->kring->nr_hwcur = ctx->nm_i;
380 }
381
382 return (rc);
383 }
384
385 static int
ena_netmap_tx_frame(struct ena_netmap_ctx * ctx)386 ena_netmap_tx_frame(struct ena_netmap_ctx *ctx)
387 {
388 struct ena_com_tx_ctx ena_tx_ctx;
389 struct ena_adapter *adapter;
390 struct ena_ring *tx_ring;
391 struct ena_tx_buffer *tx_info;
392 uint16_t req_id;
393 uint16_t header_len;
394 uint16_t packet_len;
395 int nb_hw_desc;
396 int rc;
397 void *push_hdr;
398
399 adapter = ctx->adapter;
400 if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) {
401 ena_log_nm(adapter->pdev, WARN, "Too many slots per packet\n");
402 return (EINVAL);
403 }
404
405 tx_ring = ctx->ring;
406
407 req_id = tx_ring->free_tx_ids[ctx->nt];
408 tx_info = &tx_ring->tx_buffer_info[req_id];
409 tx_info->num_of_bufs = 0;
410 tx_info->nm_info.sockets_used = 0;
411
412 rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len,
413 &packet_len);
414 if (unlikely(rc != 0)) {
415 ena_log_nm(adapter->pdev, ERR, "Failed to map Tx slot\n");
416 return (rc);
417 }
418
419 bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx));
420 ena_tx_ctx.ena_bufs = tx_info->bufs;
421 ena_tx_ctx.push_header = push_hdr;
422 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
423 ena_tx_ctx.req_id = req_id;
424 ena_tx_ctx.header_len = header_len;
425 ena_tx_ctx.meta_valid = adapter->disable_meta_caching;
426
427 /* There are no any offloads, as the netmap doesn't support them */
428
429 if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
430 ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx))
431 ena_ring_tx_doorbell(tx_ring);
432
433 rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc);
434 if (unlikely(rc != 0)) {
435 if (likely(rc == ENA_COM_NO_MEM)) {
436 ena_log_nm(adapter->pdev, DBG,
437 "Tx ring[%d] is out of space\n", tx_ring->que->id);
438 } else {
439 ena_log_nm(adapter->pdev, ERR,
440 "Failed to prepare Tx bufs\n");
441 ena_trigger_reset(adapter,
442 ENA_REGS_RESET_DRIVER_INVALID_STATE);
443 }
444 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
445
446 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
447 return (rc);
448 }
449
450 counter_enter();
451 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
452 counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len);
453 counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
454 counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len);
455 counter_exit();
456
457 tx_info->tx_descs = nb_hw_desc;
458
459 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
460
461 for (unsigned int i = 0; i < tx_info->num_of_bufs; i++)
462 bus_dmamap_sync(adapter->tx_buf_tag,
463 tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE);
464
465 return (0);
466 }
467
468 static inline uint16_t
ena_netmap_count_slots(struct ena_netmap_ctx * ctx)469 ena_netmap_count_slots(struct ena_netmap_ctx *ctx)
470 {
471 uint16_t slots = 1;
472 uint16_t nm = ctx->nm_i;
473
474 while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) {
475 slots++;
476 nm = nm_next(nm, ctx->lim);
477 }
478
479 return slots;
480 }
481
482 static inline uint16_t
ena_netmap_packet_len(struct netmap_slot * slots,u_int slot_index,uint16_t limit)483 ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index,
484 uint16_t limit)
485 {
486 struct netmap_slot *nm_slot;
487 uint16_t packet_size = 0;
488
489 do {
490 nm_slot = &slots[slot_index];
491 packet_size += nm_slot->len;
492 slot_index = nm_next(slot_index, limit);
493 } while ((nm_slot->flags & NS_MOREFRAG) != 0);
494
495 return packet_size;
496 }
497
498 static int
ena_netmap_copy_data(struct netmap_adapter * na,struct netmap_slot * slots,u_int slot_index,uint16_t limit,uint16_t bytes_to_copy,void * destination)499 ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots,
500 u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination)
501 {
502 struct netmap_slot *nm_slot;
503 void *slot_vaddr;
504 uint16_t data_amount;
505
506 do {
507 nm_slot = &slots[slot_index];
508 slot_vaddr = NMB(na, nm_slot);
509 if (unlikely(slot_vaddr == NULL))
510 return (EINVAL);
511
512 data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len);
513 memcpy(destination, slot_vaddr, data_amount);
514 bytes_to_copy -= data_amount;
515
516 slot_index = nm_next(slot_index, limit);
517 } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0);
518
519 return (0);
520 }
521
522 static int
ena_netmap_map_single_slot(struct netmap_adapter * na,struct netmap_slot * slot,bus_dma_tag_t dmatag,bus_dmamap_t dmamap,void ** vaddr,uint64_t * paddr)523 ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot,
524 bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr)
525 {
526 device_t pdev;
527 int rc;
528
529 pdev = ((struct ena_adapter *)if_getsoftc(na->ifp))->pdev;
530
531 *vaddr = PNMB(na, slot, paddr);
532 if (unlikely(vaddr == NULL)) {
533 ena_log_nm(pdev, ERR, "Slot address is NULL\n");
534 return (EINVAL);
535 }
536
537 rc = netmap_load_map(na, dmatag, dmamap, *vaddr);
538 if (unlikely(rc != 0)) {
539 ena_log_nm(pdev, ERR, "Failed to map slot %d for DMA\n",
540 slot->buf_idx);
541 return (EINVAL);
542 }
543
544 return (0);
545 }
546
547 static int
ena_netmap_tx_map_slots(struct ena_netmap_ctx * ctx,struct ena_tx_buffer * tx_info,void ** push_hdr,uint16_t * header_len,uint16_t * packet_len)548 ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
549 struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len,
550 uint16_t *packet_len)
551 {
552 struct netmap_slot *slot;
553 struct ena_com_buf *ena_buf;
554 struct ena_adapter *adapter;
555 struct ena_ring *tx_ring;
556 struct ena_netmap_tx_info *nm_info;
557 bus_dmamap_t *nm_maps;
558 void *vaddr;
559 uint64_t paddr;
560 uint32_t *nm_buf_idx;
561 uint32_t slot_head_len;
562 uint32_t frag_len;
563 uint32_t remaining_len;
564 uint16_t push_len;
565 uint16_t delta;
566 int rc;
567
568 adapter = ctx->adapter;
569 tx_ring = ctx->ring;
570 ena_buf = tx_info->bufs;
571 nm_info = &tx_info->nm_info;
572 nm_maps = nm_info->map_seg;
573 nm_buf_idx = nm_info->socket_buf_idx;
574 slot = &ctx->slots[ctx->nm_i];
575
576 slot_head_len = slot->len;
577 *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim);
578 remaining_len = *packet_len;
579 delta = 0;
580
581 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
582 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
583 /*
584 * When the device is in LLQ mode, the driver will copy
585 * the header into the device memory space.
586 * The ena_com layer assumes that the header is in a linear
587 * memory space.
588 * This assumption might be wrong since part of the header
589 * can be in the fragmented buffers.
590 * First, check if header fits in the first slot. If not, copy
591 * it to separate buffer that will be holding linearized data.
592 */
593 push_len = min_t(uint32_t, *packet_len,
594 tx_ring->tx_max_header_size);
595 *header_len = push_len;
596 /* If header is in linear space, just point to socket's data. */
597 if (likely(push_len <= slot_head_len)) {
598 *push_hdr = NMB(ctx->na, slot);
599 if (unlikely(push_hdr == NULL)) {
600 ena_log_nm(adapter->pdev, ERR,
601 "Slot vaddress is NULL\n");
602 return (EINVAL);
603 }
604 /*
605 * Otherwise, copy whole portion of header from multiple
606 * slots to intermediate buffer.
607 */
608 } else {
609 rc = ena_netmap_copy_data(ctx->na, ctx->slots,
610 ctx->nm_i, ctx->lim, push_len,
611 tx_ring->push_buf_intermediate_buf);
612 if (unlikely(rc)) {
613 ena_log_nm(adapter->pdev, ERR,
614 "Failed to copy data from slots to push_buf\n");
615 return (EINVAL);
616 }
617
618 *push_hdr = tx_ring->push_buf_intermediate_buf;
619 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
620
621 delta = push_len - slot_head_len;
622 }
623
624 ena_log_nm(adapter->pdev, DBG,
625 "slot: %d header_buf->vaddr: %p push_len: %d\n",
626 slot->buf_idx, *push_hdr, push_len);
627
628 /*
629 * If header was in linear memory space, map for the dma rest of
630 * the data in the first mbuf of the mbuf chain.
631 */
632 if (slot_head_len > push_len) {
633 rc = ena_netmap_map_single_slot(ctx->na, slot,
634 adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
635 if (unlikely(rc != 0)) {
636 ena_log_nm(adapter->pdev, ERR,
637 "DMA mapping error\n");
638 return (rc);
639 }
640 nm_maps++;
641
642 ena_buf->paddr = paddr + push_len;
643 ena_buf->len = slot->len - push_len;
644 ena_buf++;
645
646 tx_info->num_of_bufs++;
647 }
648
649 remaining_len -= slot->len;
650
651 /* Save buf idx before advancing */
652 *nm_buf_idx = slot->buf_idx;
653 nm_buf_idx++;
654 slot->buf_idx = 0;
655
656 /* Advance to the next socket */
657 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
658 slot = &ctx->slots[ctx->nm_i];
659 nm_info->sockets_used++;
660
661 /*
662 * If header is in non linear space (delta > 0), then skip mbufs
663 * containing header and map the last one containing both header
664 * and the packet data.
665 * The first segment is already counted in.
666 */
667 while (delta > 0) {
668 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
669 frag_len = slot->len;
670
671 /*
672 * If whole segment contains header just move to the
673 * next one and reduce delta.
674 */
675 if (unlikely(delta >= frag_len)) {
676 delta -= frag_len;
677 } else {
678 /*
679 * Map the data and then assign it with the
680 * offsets
681 */
682 rc = ena_netmap_map_single_slot(ctx->na, slot,
683 adapter->tx_buf_tag, *nm_maps, &vaddr,
684 &paddr);
685 if (unlikely(rc != 0)) {
686 ena_log_nm(adapter->pdev, ERR,
687 "DMA mapping error\n");
688 goto error_map;
689 }
690 nm_maps++;
691
692 ena_buf->paddr = paddr + delta;
693 ena_buf->len = slot->len - delta;
694 ena_buf++;
695
696 tx_info->num_of_bufs++;
697 delta = 0;
698 }
699
700 remaining_len -= slot->len;
701
702 /* Save buf idx before advancing */
703 *nm_buf_idx = slot->buf_idx;
704 nm_buf_idx++;
705 slot->buf_idx = 0;
706
707 /* Advance to the next socket */
708 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
709 slot = &ctx->slots[ctx->nm_i];
710 nm_info->sockets_used++;
711 }
712 } else {
713 *push_hdr = NULL;
714 /*
715 * header_len is just a hint for the device. Because netmap is
716 * not giving us any information about packet header length and
717 * it is not guaranteed that all packet headers will be in the
718 * 1st slot, setting header_len to 0 is making the device ignore
719 * this value and resolve header on it's own.
720 */
721 *header_len = 0;
722 }
723
724 /* Map all remaining data (regular routine for non-LLQ mode) */
725 while (remaining_len > 0) {
726 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
727
728 rc = ena_netmap_map_single_slot(ctx->na, slot,
729 adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
730 if (unlikely(rc != 0)) {
731 ena_log_nm(adapter->pdev, ERR, "DMA mapping error\n");
732 goto error_map;
733 }
734 nm_maps++;
735
736 ena_buf->paddr = paddr;
737 ena_buf->len = slot->len;
738 ena_buf++;
739
740 tx_info->num_of_bufs++;
741
742 remaining_len -= slot->len;
743
744 /* Save buf idx before advancing */
745 *nm_buf_idx = slot->buf_idx;
746 nm_buf_idx++;
747 slot->buf_idx = 0;
748
749 /* Advance to the next socket */
750 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
751 slot = &ctx->slots[ctx->nm_i];
752 nm_info->sockets_used++;
753 }
754
755 return (0);
756
757 error_map:
758 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
759
760 return (rc);
761 }
762
763 static void
ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx * ctx,struct ena_tx_buffer * tx_info)764 ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
765 struct ena_tx_buffer *tx_info)
766 {
767 struct ena_netmap_tx_info *nm_info;
768 int n;
769
770 nm_info = &tx_info->nm_info;
771
772 /**
773 * As the used sockets must not be equal to the buffers used in the LLQ
774 * mode, they must be treated separately.
775 * First, unmap the DMA maps.
776 */
777 n = tx_info->num_of_bufs;
778 while (n--) {
779 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
780 nm_info->map_seg[n]);
781 }
782 tx_info->num_of_bufs = 0;
783
784 /* Next, retain the sockets back to the userspace */
785 n = nm_info->sockets_used;
786 while (n--) {
787 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
788 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
789 nm_info->socket_buf_idx[n] = 0;
790 ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
791 }
792 nm_info->sockets_used = 0;
793 }
794
795 static void
ena_netmap_tx_cleanup(struct ena_netmap_ctx * ctx)796 ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx)
797 {
798 uint16_t req_id;
799 uint16_t total_tx_descs = 0;
800
801 ctx->nm_i = ctx->kring->nr_hwtail;
802 ctx->nt = ctx->ring->next_to_clean;
803
804 /* Reclaim buffers for completed transmissions */
805 while (ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id) >= 0) {
806 if (validate_tx_req_id(ctx->ring, req_id) != 0)
807 break;
808 total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id);
809 }
810
811 ctx->kring->nr_hwtail = ctx->nm_i;
812
813 if (total_tx_descs > 0) {
814 /* acknowledge completion of sent packets */
815 ctx->ring->next_to_clean = ctx->nt;
816 ena_com_comp_ack(ctx->ring->ena_com_io_sq, total_tx_descs);
817 }
818 }
819
820 static uint16_t
ena_netmap_tx_clean_one(struct ena_netmap_ctx * ctx,uint16_t req_id)821 ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id)
822 {
823 struct ena_tx_buffer *tx_info;
824 struct ena_netmap_tx_info *nm_info;
825 int n;
826
827 tx_info = &ctx->ring->tx_buffer_info[req_id];
828 nm_info = &tx_info->nm_info;
829
830 /**
831 * As the used sockets must not be equal to the buffers used in the LLQ
832 * mode, they must be treated separately.
833 * First, unmap the DMA maps.
834 */
835 n = tx_info->num_of_bufs;
836 for (n = 0; n < tx_info->num_of_bufs; n++) {
837 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
838 nm_info->map_seg[n]);
839 }
840 tx_info->num_of_bufs = 0;
841
842 /* Next, retain the sockets back to the userspace */
843 for (n = 0; n < nm_info->sockets_used; n++) {
844 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
845 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0,
846 ctx->adapter->ena_dev, "Tx idx is not 0.\n");
847 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
848 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
849 nm_info->socket_buf_idx[n] = 0;
850 }
851 nm_info->sockets_used = 0;
852
853 ctx->ring->free_tx_ids[ctx->nt] = req_id;
854 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim);
855
856 return tx_info->tx_descs;
857 }
858
859 static inline int
validate_tx_req_id(struct ena_ring * tx_ring,uint16_t req_id)860 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
861 {
862 struct ena_adapter *adapter = tx_ring->adapter;
863
864 if (likely(req_id < tx_ring->ring_size))
865 return (0);
866
867 ena_log_nm(adapter->pdev, WARN, "Invalid req_id %hu in qid %hu\n",
868 req_id, tx_ring->qid);
869 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
870
871 ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
872
873 return (EFAULT);
874 }
875
876 static int
ena_netmap_rxsync(struct netmap_kring * kring,int flags)877 ena_netmap_rxsync(struct netmap_kring *kring, int flags)
878 {
879 struct ena_netmap_ctx ctx;
880 int rc;
881
882 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id));
883 ctx.ring = &ctx.adapter->rx_ring[kring->ring_id];
884
885 if (ctx.kring->rhead > ctx.lim) {
886 /* Probably not needed to release slots from RX ring. */
887 return (netmap_ring_reinit(ctx.kring));
888 }
889
890 if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0))
891 return (0);
892
893 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
894 return (0);
895
896 if ((rc = ena_netmap_rx_frames(&ctx)) != 0)
897 return (rc);
898
899 ena_netmap_rx_cleanup(&ctx);
900
901 return (0);
902 }
903
904 static inline int
ena_netmap_rx_frames(struct ena_netmap_ctx * ctx)905 ena_netmap_rx_frames(struct ena_netmap_ctx *ctx)
906 {
907 int rc = 0;
908 int frames_counter = 0;
909
910 ctx->nt = ctx->ring->next_to_clean;
911 ctx->nm_i = ctx->kring->nr_hwtail;
912
913 while ((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) {
914 frames_counter++;
915 /* In case of multiple frames, it is not an error. */
916 rc = 0;
917 if (frames_counter > ENA_MAX_FRAMES) {
918 ena_log_nm(ctx->adapter->pdev, ERR,
919 "Driver is stuck in the Rx loop\n");
920 break;
921 }
922 };
923
924 ctx->kring->nr_hwtail = ctx->nm_i;
925 ctx->kring->nr_kflags &= ~NKR_PENDINTR;
926 ctx->ring->next_to_clean = ctx->nt;
927
928 return (rc);
929 }
930
931 static inline int
ena_netmap_rx_frame(struct ena_netmap_ctx * ctx)932 ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
933 {
934 struct ena_com_rx_ctx ena_rx_ctx;
935 enum ena_regs_reset_reason_types reset_reason;
936 int rc, len = 0;
937 uint16_t buf, nm;
938
939 ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs;
940 ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size;
941 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
942 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
943
944 rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx);
945 if (unlikely(rc != 0)) {
946 ena_log_nm(ctx->adapter->pdev, ERR,
947 "Failed to read pkt from the device with error: %d\n", rc);
948 if (rc == ENA_COM_NO_SPACE) {
949 counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1);
950 reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
951 } else {
952 counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1);
953 reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
954 }
955 ena_trigger_reset(ctx->adapter, reset_reason);
956 return (rc);
957 }
958 if (unlikely(ena_rx_ctx.descs == 0))
959 return (ENA_NETMAP_NO_MORE_FRAMES);
960
961 ena_log_nm(ctx->adapter->pdev, DBG,
962 "Rx: q %d got packet from ena. descs #:"
963 " %d l3 proto %d l4 proto %d hash: %x\n",
964 ctx->ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
965 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
966
967 for (buf = 0; buf < ena_rx_ctx.descs; buf++)
968 if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0)
969 break;
970 /*
971 * ena_netmap_rx_load_desc doesn't know the number of descriptors.
972 * It just set flag NS_MOREFRAG to all slots, then here flag of
973 * last slot is cleared.
974 */
975 ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED;
976
977 if (rc != 0) {
978 goto rx_clear_desc;
979 }
980
981 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
982 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD);
983
984 counter_enter();
985 counter_u64_add_protected(ctx->ring->rx_stats.bytes, len);
986 counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len);
987 counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1);
988 counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1);
989 counter_exit();
990
991 return (ENA_NETMAP_MORE_FRAMES);
992
993 rx_clear_desc:
994 nm = ctx->nm_i;
995
996 /* Remove failed packet from ring */
997 while (buf--) {
998 ctx->slots[nm].flags = 0;
999 ctx->slots[nm].len = 0;
1000 nm = nm_prev(nm, ctx->lim);
1001 }
1002
1003 return (rc);
1004 }
1005
1006 static inline int
ena_netmap_rx_load_desc(struct ena_netmap_ctx * ctx,uint16_t buf,int * len)1007 ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len)
1008 {
1009 struct ena_rx_buffer *rx_info;
1010 uint16_t req_id;
1011
1012 req_id = ctx->ring->ena_bufs[buf].req_id;
1013 rx_info = &ctx->ring->rx_buffer_info[req_id];
1014 bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map,
1015 BUS_DMASYNC_POSTREAD);
1016 netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map);
1017
1018 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, ctx->adapter->ena_dev,
1019 "Rx idx is not 0.\n");
1020
1021 ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx;
1022 rx_info->netmap_buf_idx = 0;
1023 /*
1024 * Set NS_MOREFRAG to all slots.
1025 * Then ena_netmap_rx_frame clears it from last one.
1026 */
1027 ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED;
1028 ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len;
1029 *len += ctx->slots[ctx->nm_i].len;
1030 ctx->ring->free_rx_ids[ctx->nt] = req_id;
1031 ena_log_nm(ctx->adapter->pdev, DBG,
1032 "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", rx_info,
1033 ctx->slots[ctx->nm_i].buf_idx, (uintmax_t)rx_info->ena_buf.paddr,
1034 ctx->nm_i);
1035
1036 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
1037 ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
1038
1039 return (0);
1040 }
1041
1042 static inline void
ena_netmap_rx_cleanup(struct ena_netmap_ctx * ctx)1043 ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx)
1044 {
1045 int refill_required;
1046
1047 refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur;
1048 if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail)
1049 refill_required -= 1;
1050
1051 if (refill_required == 0)
1052 return;
1053 else if (refill_required < 0)
1054 refill_required += ctx->kring->nkr_num_slots;
1055
1056 ena_refill_rx_bufs(ctx->ring, refill_required);
1057 }
1058
1059 static inline void
ena_netmap_fill_ctx(struct netmap_kring * kring,struct ena_netmap_ctx * ctx,uint16_t ena_qid)1060 ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx,
1061 uint16_t ena_qid)
1062 {
1063 ctx->kring = kring;
1064 ctx->na = kring->na;
1065 ctx->adapter = if_getsoftc(ctx->na->ifp);
1066 ctx->lim = kring->nkr_num_slots - 1;
1067 ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid];
1068 ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid];
1069 ctx->slots = kring->ring->slot;
1070 }
1071
1072 void
ena_netmap_unload(struct ena_adapter * adapter,bus_dmamap_t map)1073 ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map)
1074 {
1075 struct netmap_adapter *na = NA(adapter->ifp);
1076
1077 netmap_unload_map(na, adapter->tx_buf_tag, map);
1078 }
1079
1080 #endif /* DEV_NETMAP */
1081