1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "ena.h" 34 #include "ena_datapath.h" 35 #ifdef DEV_NETMAP 36 #include "ena_netmap.h" 37 #endif /* DEV_NETMAP */ 38 39 /********************************************************************* 40 * Static functions prototypes 41 *********************************************************************/ 42 43 static int ena_tx_cleanup(struct ena_ring *); 44 static int ena_rx_cleanup(struct ena_ring *); 45 static inline int validate_tx_req_id(struct ena_ring *, uint16_t); 46 static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *, 47 struct mbuf *); 48 static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *, 49 struct ena_com_rx_ctx *, uint16_t *); 50 static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *, 51 struct mbuf *); 52 static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *, bool); 53 static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, 54 struct mbuf **mbuf); 55 static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **); 56 static void ena_start_xmit(struct ena_ring *); 57 58 /********************************************************************* 59 * Global functions 60 *********************************************************************/ 61 62 void 63 ena_cleanup(void *arg, int pending) 64 { 65 struct ena_que *que = arg; 66 struct ena_adapter *adapter = que->adapter; 67 if_t ifp = adapter->ifp; 68 struct ena_ring *tx_ring; 69 struct ena_ring *rx_ring; 70 struct ena_com_io_cq* io_cq; 71 struct ena_eth_io_intr_reg intr_reg; 72 int qid, ena_qid; 73 int txc, rxc, i; 74 75 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 76 return; 77 78 ena_trace(ENA_DBG, "MSI-X TX/RX routine\n"); 79 80 tx_ring = que->tx_ring; 81 rx_ring = que->rx_ring; 82 qid = que->id; 83 ena_qid = ENA_IO_TXQ_IDX(qid); 84 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 85 86 tx_ring->first_interrupt = true; 87 rx_ring->first_interrupt = true; 88 89 for (i = 0; i < CLEAN_BUDGET; ++i) { 90 rxc = ena_rx_cleanup(rx_ring); 91 txc = ena_tx_cleanup(tx_ring); 92 93 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 94 return; 95 96 if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) 97 break; 98 } 99 100 /* Signal that work is done and unmask interrupt */ 101 ena_com_update_intr_reg(&intr_reg, 102 RX_IRQ_INTERVAL, 103 TX_IRQ_INTERVAL, 104 true); 105 ena_com_unmask_intr(io_cq, &intr_reg); 106 } 107 108 void 109 ena_deferred_mq_start(void *arg, int pending) 110 { 111 struct ena_ring *tx_ring = (struct ena_ring *)arg; 112 struct ifnet *ifp = tx_ring->adapter->ifp; 113 114 while (!drbr_empty(ifp, tx_ring->br) && 115 tx_ring->running && 116 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 117 ENA_RING_MTX_LOCK(tx_ring); 118 ena_start_xmit(tx_ring); 119 ENA_RING_MTX_UNLOCK(tx_ring); 120 } 121 } 122 123 int 124 ena_mq_start(if_t ifp, struct mbuf *m) 125 { 126 struct ena_adapter *adapter = ifp->if_softc; 127 struct ena_ring *tx_ring; 128 int ret, is_drbr_empty; 129 uint32_t i; 130 131 if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 132 return (ENODEV); 133 134 /* Which queue to use */ 135 /* 136 * If everything is setup correctly, it should be the 137 * same bucket that the current CPU we're on is. 138 * It should improve performance. 139 */ 140 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 141 i = m->m_pkthdr.flowid % adapter->num_io_queues; 142 } else { 143 i = curcpu % adapter->num_io_queues; 144 } 145 tx_ring = &adapter->tx_ring[i]; 146 147 /* Check if drbr is empty before putting packet */ 148 is_drbr_empty = drbr_empty(ifp, tx_ring->br); 149 ret = drbr_enqueue(ifp, tx_ring->br, m); 150 if (unlikely(ret != 0)) { 151 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 152 return (ret); 153 } 154 155 if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { 156 ena_start_xmit(tx_ring); 157 ENA_RING_MTX_UNLOCK(tx_ring); 158 } else { 159 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 160 } 161 162 return (0); 163 } 164 165 void 166 ena_qflush(if_t ifp) 167 { 168 struct ena_adapter *adapter = ifp->if_softc; 169 struct ena_ring *tx_ring = adapter->tx_ring; 170 int i; 171 172 for(i = 0; i < adapter->num_io_queues; ++i, ++tx_ring) 173 if (!drbr_empty(ifp, tx_ring->br)) { 174 ENA_RING_MTX_LOCK(tx_ring); 175 drbr_flush(ifp, tx_ring->br); 176 ENA_RING_MTX_UNLOCK(tx_ring); 177 } 178 179 if_qflush(ifp); 180 } 181 182 /********************************************************************* 183 * Static functions 184 *********************************************************************/ 185 186 static inline int 187 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) 188 { 189 struct ena_adapter *adapter = tx_ring->adapter; 190 struct ena_tx_buffer *tx_info = NULL; 191 192 if (likely(req_id < tx_ring->ring_size)) { 193 tx_info = &tx_ring->tx_buffer_info[req_id]; 194 if (tx_info->mbuf != NULL) 195 return (0); 196 device_printf(adapter->pdev, 197 "tx_info doesn't have valid mbuf\n"); 198 } 199 200 device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id); 201 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); 202 203 /* Trigger device reset */ 204 ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID); 205 206 return (EFAULT); 207 } 208 209 /** 210 * ena_tx_cleanup - clear sent packets and corresponding descriptors 211 * @tx_ring: ring for which we want to clean packets 212 * 213 * Once packets are sent, we ask the device in a loop for no longer used 214 * descriptors. We find the related mbuf chain in a map (index in an array) 215 * and free it, then update ring state. 216 * This is performed in "endless" loop, updating ring pointers every 217 * TX_COMMIT. The first check of free descriptor is performed before the actual 218 * loop, then repeated at the loop end. 219 **/ 220 static int 221 ena_tx_cleanup(struct ena_ring *tx_ring) 222 { 223 struct ena_adapter *adapter; 224 struct ena_com_io_cq* io_cq; 225 uint16_t next_to_clean; 226 uint16_t req_id; 227 uint16_t ena_qid; 228 unsigned int total_done = 0; 229 int rc; 230 int commit = TX_COMMIT; 231 int budget = TX_BUDGET; 232 int work_done; 233 bool above_thresh; 234 235 adapter = tx_ring->que->adapter; 236 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 237 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 238 next_to_clean = tx_ring->next_to_clean; 239 240 #ifdef DEV_NETMAP 241 if (netmap_tx_irq(adapter->ifp, tx_ring->qid) != NM_IRQ_PASS) 242 return (0); 243 #endif /* DEV_NETMAP */ 244 245 do { 246 struct ena_tx_buffer *tx_info; 247 struct mbuf *mbuf; 248 249 rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); 250 if (unlikely(rc != 0)) 251 break; 252 253 rc = validate_tx_req_id(tx_ring, req_id); 254 if (unlikely(rc != 0)) 255 break; 256 257 tx_info = &tx_ring->tx_buffer_info[req_id]; 258 259 mbuf = tx_info->mbuf; 260 261 tx_info->mbuf = NULL; 262 bintime_clear(&tx_info->timestamp); 263 264 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 265 BUS_DMASYNC_POSTWRITE); 266 bus_dmamap_unload(adapter->tx_buf_tag, 267 tx_info->dmamap); 268 269 ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n", 270 tx_ring->qid, mbuf); 271 272 m_freem(mbuf); 273 274 total_done += tx_info->tx_descs; 275 276 tx_ring->free_tx_ids[next_to_clean] = req_id; 277 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 278 tx_ring->ring_size); 279 280 if (unlikely(--commit == 0)) { 281 commit = TX_COMMIT; 282 /* update ring state every TX_COMMIT descriptor */ 283 tx_ring->next_to_clean = next_to_clean; 284 ena_com_comp_ack( 285 &adapter->ena_dev->io_sq_queues[ena_qid], 286 total_done); 287 ena_com_update_dev_comp_head(io_cq); 288 total_done = 0; 289 } 290 } while (likely(--budget)); 291 292 work_done = TX_BUDGET - budget; 293 294 ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n", 295 tx_ring->qid, work_done); 296 297 /* If there is still something to commit update ring state */ 298 if (likely(commit != TX_COMMIT)) { 299 tx_ring->next_to_clean = next_to_clean; 300 ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], 301 total_done); 302 ena_com_update_dev_comp_head(io_cq); 303 } 304 305 /* 306 * Need to make the rings circular update visible to 307 * ena_xmit_mbuf() before checking for tx_ring->running. 308 */ 309 mb(); 310 311 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 312 ENA_TX_RESUME_THRESH); 313 if (unlikely(!tx_ring->running && above_thresh)) { 314 ENA_RING_MTX_LOCK(tx_ring); 315 above_thresh = 316 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 317 ENA_TX_RESUME_THRESH); 318 if (!tx_ring->running && above_thresh) { 319 tx_ring->running = true; 320 counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 321 taskqueue_enqueue(tx_ring->enqueue_tq, 322 &tx_ring->enqueue_task); 323 } 324 ENA_RING_MTX_UNLOCK(tx_ring); 325 } 326 327 return (work_done); 328 } 329 330 static void 331 ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 332 struct mbuf *mbuf) 333 { 334 struct ena_adapter *adapter = rx_ring->adapter; 335 336 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 337 mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; 338 339 if (ena_rx_ctx->frag && 340 (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) { 341 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 342 return; 343 } 344 345 switch (ena_rx_ctx->l3_proto) { 346 case ENA_ETH_IO_L3_PROTO_IPV4: 347 switch (ena_rx_ctx->l4_proto) { 348 case ENA_ETH_IO_L4_PROTO_TCP: 349 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); 350 break; 351 case ENA_ETH_IO_L4_PROTO_UDP: 352 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4); 353 break; 354 default: 355 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); 356 } 357 break; 358 case ENA_ETH_IO_L3_PROTO_IPV6: 359 switch (ena_rx_ctx->l4_proto) { 360 case ENA_ETH_IO_L4_PROTO_TCP: 361 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); 362 break; 363 case ENA_ETH_IO_L4_PROTO_UDP: 364 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6); 365 break; 366 default: 367 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); 368 } 369 break; 370 case ENA_ETH_IO_L3_PROTO_UNKNOWN: 371 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 372 break; 373 default: 374 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 375 } 376 } else { 377 mbuf->m_pkthdr.flowid = rx_ring->qid; 378 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 379 } 380 } 381 382 /** 383 * ena_rx_mbuf - assemble mbuf from descriptors 384 * @rx_ring: ring for which we want to clean packets 385 * @ena_bufs: buffer info 386 * @ena_rx_ctx: metadata for this packet(s) 387 * @next_to_clean: ring pointer, will be updated only upon success 388 * 389 **/ 390 static struct mbuf* 391 ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, 392 struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean) 393 { 394 struct mbuf *mbuf; 395 struct ena_rx_buffer *rx_info; 396 struct ena_adapter *adapter; 397 unsigned int descs = ena_rx_ctx->descs; 398 int rc; 399 uint16_t ntc, len, req_id, buf = 0; 400 401 ntc = *next_to_clean; 402 adapter = rx_ring->adapter; 403 404 len = ena_bufs[buf].len; 405 req_id = ena_bufs[buf].req_id; 406 rc = validate_rx_req_id(rx_ring, req_id); 407 if (unlikely(rc != 0)) 408 return (NULL); 409 410 rx_info = &rx_ring->rx_buffer_info[req_id]; 411 if (unlikely(rx_info->mbuf == NULL)) { 412 device_printf(adapter->pdev, "NULL mbuf in rx_info"); 413 return (NULL); 414 } 415 416 ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n", 417 rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr); 418 419 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 420 BUS_DMASYNC_POSTREAD); 421 mbuf = rx_info->mbuf; 422 mbuf->m_flags |= M_PKTHDR; 423 mbuf->m_pkthdr.len = len; 424 mbuf->m_len = len; 425 mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp; 426 427 /* Fill mbuf with hash key and it's interpretation for optimization */ 428 ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf); 429 430 ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n", 431 mbuf, mbuf->m_flags, mbuf->m_pkthdr.len); 432 433 /* DMA address is not needed anymore, unmap it */ 434 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 435 436 rx_info->mbuf = NULL; 437 rx_ring->free_rx_ids[ntc] = req_id; 438 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 439 440 /* 441 * While we have more than 1 descriptors for one rcvd packet, append 442 * other mbufs to the main one 443 */ 444 while (--descs) { 445 ++buf; 446 len = ena_bufs[buf].len; 447 req_id = ena_bufs[buf].req_id; 448 rc = validate_rx_req_id(rx_ring, req_id); 449 if (unlikely(rc != 0)) { 450 /* 451 * If the req_id is invalid, then the device will be 452 * reset. In that case we must free all mbufs that 453 * were already gathered. 454 */ 455 m_freem(mbuf); 456 return (NULL); 457 } 458 rx_info = &rx_ring->rx_buffer_info[req_id]; 459 460 if (unlikely(rx_info->mbuf == NULL)) { 461 device_printf(adapter->pdev, "NULL mbuf in rx_info"); 462 /* 463 * If one of the required mbufs was not allocated yet, 464 * we can break there. 465 * All earlier used descriptors will be reallocated 466 * later and not used mbufs can be reused. 467 * The next_to_clean pointer will not be updated in case 468 * of an error, so caller should advance it manually 469 * in error handling routine to keep it up to date 470 * with hw ring. 471 */ 472 m_freem(mbuf); 473 return (NULL); 474 } 475 476 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 477 BUS_DMASYNC_POSTREAD); 478 if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { 479 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 480 ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p\n", 481 mbuf); 482 } 483 484 ena_trace(ENA_DBG | ENA_RXPTH, 485 "rx mbuf updated. len %d\n", mbuf->m_pkthdr.len); 486 487 /* Free already appended mbuf, it won't be useful anymore */ 488 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 489 m_freem(rx_info->mbuf); 490 rx_info->mbuf = NULL; 491 492 rx_ring->free_rx_ids[ntc] = req_id; 493 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 494 } 495 496 *next_to_clean = ntc; 497 498 return (mbuf); 499 } 500 501 /** 502 * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum 503 **/ 504 static inline void 505 ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 506 struct mbuf *mbuf) 507 { 508 509 /* if IP and error */ 510 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && 511 ena_rx_ctx->l3_csum_err)) { 512 /* ipv4 checksum error */ 513 mbuf->m_pkthdr.csum_flags = 0; 514 counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 515 ena_trace(ENA_DBG, "RX IPv4 header checksum error\n"); 516 return; 517 } 518 519 /* if TCP/UDP */ 520 if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 521 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) { 522 if (ena_rx_ctx->l4_csum_err) { 523 /* TCP/UDP checksum error */ 524 mbuf->m_pkthdr.csum_flags = 0; 525 counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 526 ena_trace(ENA_DBG, "RX L4 checksum error\n"); 527 } else { 528 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 529 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 530 } 531 } 532 } 533 534 /** 535 * ena_rx_cleanup - handle rx irq 536 * @arg: ring for which irq is being handled 537 **/ 538 static int 539 ena_rx_cleanup(struct ena_ring *rx_ring) 540 { 541 struct ena_adapter *adapter; 542 struct mbuf *mbuf; 543 struct ena_com_rx_ctx ena_rx_ctx; 544 struct ena_com_io_cq* io_cq; 545 struct ena_com_io_sq* io_sq; 546 if_t ifp; 547 uint16_t ena_qid; 548 uint16_t next_to_clean; 549 uint32_t refill_required; 550 uint32_t refill_threshold; 551 uint32_t do_if_input = 0; 552 unsigned int qid; 553 int rc, i; 554 int budget = RX_BUDGET; 555 #ifdef DEV_NETMAP 556 int done; 557 #endif /* DEV_NETMAP */ 558 559 adapter = rx_ring->que->adapter; 560 ifp = adapter->ifp; 561 qid = rx_ring->que->id; 562 ena_qid = ENA_IO_RXQ_IDX(qid); 563 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 564 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 565 next_to_clean = rx_ring->next_to_clean; 566 567 #ifdef DEV_NETMAP 568 if (netmap_rx_irq(adapter->ifp, rx_ring->qid, &done) != NM_IRQ_PASS) 569 return (0); 570 #endif /* DEV_NETMAP */ 571 572 ena_trace(ENA_DBG, "rx: qid %d\n", qid); 573 574 do { 575 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 576 ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size; 577 ena_rx_ctx.descs = 0; 578 bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 579 io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); 580 rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx); 581 582 if (unlikely(rc != 0)) 583 goto error; 584 585 if (unlikely(ena_rx_ctx.descs == 0)) 586 break; 587 588 ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " 589 "descs #: %d l3 proto %d l4 proto %d hash: %x\n", 590 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, 591 ena_rx_ctx.l4_proto, ena_rx_ctx.hash); 592 593 /* Receive mbuf from the ring */ 594 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, 595 &ena_rx_ctx, &next_to_clean); 596 bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 597 io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); 598 /* Exit if we failed to retrieve a buffer */ 599 if (unlikely(mbuf == NULL)) { 600 for (i = 0; i < ena_rx_ctx.descs; ++i) { 601 rx_ring->free_rx_ids[next_to_clean] = 602 rx_ring->ena_bufs[i].req_id; 603 next_to_clean = 604 ENA_RX_RING_IDX_NEXT(next_to_clean, 605 rx_ring->ring_size); 606 607 } 608 break; 609 } 610 611 if (((ifp->if_capenable & IFCAP_RXCSUM) != 0) || 612 ((ifp->if_capenable & IFCAP_RXCSUM_IPV6) != 0)) { 613 ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf); 614 } 615 616 counter_enter(); 617 counter_u64_add_protected(rx_ring->rx_stats.bytes, 618 mbuf->m_pkthdr.len); 619 counter_u64_add_protected(adapter->hw_stats.rx_bytes, 620 mbuf->m_pkthdr.len); 621 counter_exit(); 622 /* 623 * LRO is only for IP/TCP packets and TCP checksum of the packet 624 * should be computed by hardware. 625 */ 626 do_if_input = 1; 627 if (((ifp->if_capenable & IFCAP_LRO) != 0) && 628 ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && 629 (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) { 630 /* 631 * Send to the stack if: 632 * - LRO not enabled, or 633 * - no LRO resources, or 634 * - lro enqueue fails 635 */ 636 if ((rx_ring->lro.lro_cnt != 0) && 637 (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)) 638 do_if_input = 0; 639 } 640 if (do_if_input != 0) { 641 ena_trace(ENA_DBG | ENA_RXPTH, 642 "calling if_input() with mbuf %p\n", mbuf); 643 (*ifp->if_input)(ifp, mbuf); 644 } 645 646 counter_enter(); 647 counter_u64_add_protected(rx_ring->rx_stats.cnt, 1); 648 counter_u64_add_protected(adapter->hw_stats.rx_packets, 1); 649 counter_exit(); 650 } while (--budget); 651 652 rx_ring->next_to_clean = next_to_clean; 653 654 refill_required = ena_com_free_q_entries(io_sq); 655 refill_threshold = min_t(int, 656 rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, 657 ENA_RX_REFILL_THRESH_PACKET); 658 659 if (refill_required > refill_threshold) { 660 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 661 ena_refill_rx_bufs(rx_ring, refill_required); 662 } 663 664 tcp_lro_flush_all(&rx_ring->lro); 665 666 return (RX_BUDGET - budget); 667 668 error: 669 counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1); 670 671 /* Too many desc from the device. Trigger reset */ 672 ena_trigger_reset(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); 673 674 return (0); 675 } 676 677 static void 678 ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf, 679 bool disable_meta_caching) 680 { 681 struct ena_com_tx_meta *ena_meta; 682 struct ether_vlan_header *eh; 683 struct mbuf *mbuf_next; 684 u32 mss; 685 bool offload; 686 uint16_t etype; 687 int ehdrlen; 688 struct ip *ip; 689 int iphlen; 690 struct tcphdr *th; 691 int offset; 692 693 offload = false; 694 ena_meta = &ena_tx_ctx->ena_meta; 695 mss = mbuf->m_pkthdr.tso_segsz; 696 697 if (mss != 0) 698 offload = true; 699 700 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) 701 offload = true; 702 703 if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) 704 offload = true; 705 706 if (!offload) { 707 if (disable_meta_caching) { 708 memset(ena_meta, 0, sizeof(*ena_meta)); 709 ena_tx_ctx->meta_valid = 1; 710 } else { 711 ena_tx_ctx->meta_valid = 0; 712 } 713 return; 714 } 715 716 /* Determine where frame payload starts. */ 717 eh = mtod(mbuf, struct ether_vlan_header *); 718 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 719 etype = ntohs(eh->evl_proto); 720 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 721 } else { 722 etype = ntohs(eh->evl_encap_proto); 723 ehdrlen = ETHER_HDR_LEN; 724 } 725 726 mbuf_next = m_getptr(mbuf, ehdrlen, &offset); 727 ip = (struct ip *)(mtodo(mbuf_next, offset)); 728 iphlen = ip->ip_hl << 2; 729 730 mbuf_next = m_getptr(mbuf, iphlen + ehdrlen, &offset); 731 th = (struct tcphdr *)(mtodo(mbuf_next, offset)); 732 733 if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) { 734 ena_tx_ctx->l3_csum_enable = 1; 735 } 736 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 737 ena_tx_ctx->tso_enable = 1; 738 ena_meta->l4_hdr_len = (th->th_off); 739 } 740 741 switch (etype) { 742 case ETHERTYPE_IP: 743 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 744 if ((ip->ip_off & htons(IP_DF)) != 0) 745 ena_tx_ctx->df = 1; 746 break; 747 case ETHERTYPE_IPV6: 748 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 749 750 default: 751 break; 752 } 753 754 if (ip->ip_p == IPPROTO_TCP) { 755 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 756 if ((mbuf->m_pkthdr.csum_flags & 757 (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0) 758 ena_tx_ctx->l4_csum_enable = 1; 759 else 760 ena_tx_ctx->l4_csum_enable = 0; 761 } else if (ip->ip_p == IPPROTO_UDP) { 762 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 763 if ((mbuf->m_pkthdr.csum_flags & 764 (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0) 765 ena_tx_ctx->l4_csum_enable = 1; 766 else 767 ena_tx_ctx->l4_csum_enable = 0; 768 } else { 769 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 770 ena_tx_ctx->l4_csum_enable = 0; 771 } 772 773 ena_meta->mss = mss; 774 ena_meta->l3_hdr_len = iphlen; 775 ena_meta->l3_hdr_offset = ehdrlen; 776 ena_tx_ctx->meta_valid = 1; 777 } 778 779 static int 780 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 781 { 782 struct ena_adapter *adapter; 783 struct mbuf *collapsed_mbuf; 784 int num_frags; 785 786 adapter = tx_ring->adapter; 787 num_frags = ena_mbuf_count(*mbuf); 788 789 /* One segment must be reserved for configuration descriptor. */ 790 if (num_frags < adapter->max_tx_sgl_size) 791 return (0); 792 counter_u64_add(tx_ring->tx_stats.collapse, 1); 793 794 collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, 795 adapter->max_tx_sgl_size - 1); 796 if (unlikely(collapsed_mbuf == NULL)) { 797 counter_u64_add(tx_ring->tx_stats.collapse_err, 1); 798 return (ENOMEM); 799 } 800 801 /* If mbuf was collapsed succesfully, original mbuf is released. */ 802 *mbuf = collapsed_mbuf; 803 804 return (0); 805 } 806 807 static int 808 ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, 809 struct mbuf *mbuf, void **push_hdr, u16 *header_len) 810 { 811 struct ena_adapter *adapter = tx_ring->adapter; 812 struct ena_com_buf *ena_buf; 813 bus_dma_segment_t segs[ENA_BUS_DMA_SEGS]; 814 size_t iseg = 0; 815 uint32_t mbuf_head_len; 816 uint16_t offset; 817 int rc, nsegs; 818 819 mbuf_head_len = mbuf->m_len; 820 tx_info->mbuf = mbuf; 821 ena_buf = tx_info->bufs; 822 823 /* 824 * For easier maintaining of the DMA map, map the whole mbuf even if 825 * the LLQ is used. The descriptors will be filled using the segments. 826 */ 827 rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->dmamap, mbuf, 828 segs, &nsegs, BUS_DMA_NOWAIT); 829 if (unlikely((rc != 0) || (nsegs == 0))) { 830 ena_trace(ENA_WARNING, 831 "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs); 832 goto dma_error; 833 } 834 835 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 836 /* 837 * When the device is LLQ mode, the driver will copy 838 * the header into the device memory space. 839 * the ena_com layer assumes the header is in a linear 840 * memory space. 841 * This assumption might be wrong since part of the header 842 * can be in the fragmented buffers. 843 * First check if header fits in the mbuf. If not, copy it to 844 * separate buffer that will be holding linearized data. 845 */ 846 *header_len = min_t(uint32_t, mbuf->m_pkthdr.len, tx_ring->tx_max_header_size); 847 848 /* If header is in linear space, just point into mbuf's data. */ 849 if (likely(*header_len <= mbuf_head_len)) { 850 *push_hdr = mbuf->m_data; 851 /* 852 * Otherwise, copy whole portion of header from multiple mbufs 853 * to intermediate buffer. 854 */ 855 } else { 856 m_copydata(mbuf, 0, *header_len, tx_ring->push_buf_intermediate_buf); 857 *push_hdr = tx_ring->push_buf_intermediate_buf; 858 859 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); 860 } 861 862 ena_trace(ENA_DBG | ENA_TXPTH, 863 "mbuf: %p header_buf->vaddr: %p push_len: %d\n", 864 mbuf, *push_hdr, *header_len); 865 866 /* If packet is fitted in LLQ header, no need for DMA segments. */ 867 if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) { 868 return (0); 869 } else { 870 offset = tx_ring->tx_max_header_size; 871 /* 872 * As Header part is mapped to LLQ header, we can skip it and just 873 * map the residuum of the mbuf to DMA Segments. 874 */ 875 while (offset > 0) { 876 if (offset >= segs[iseg].ds_len) { 877 offset -= segs[iseg].ds_len; 878 } else { 879 ena_buf->paddr = segs[iseg].ds_addr + offset; 880 ena_buf->len = segs[iseg].ds_len - offset; 881 ena_buf++; 882 tx_info->num_of_bufs++; 883 offset = 0; 884 } 885 iseg++; 886 } 887 } 888 } else { 889 *push_hdr = NULL; 890 /* 891 * header_len is just a hint for the device. Because FreeBSD is not 892 * giving us information about packet header length and it is not 893 * guaranteed that all packet headers will be in the 1st mbuf, setting 894 * header_len to 0 is making the device ignore this value and resolve 895 * header on it's own. 896 */ 897 *header_len = 0; 898 } 899 900 /* Map rest of the mbuf */ 901 while (iseg < nsegs) { 902 ena_buf->paddr = segs[iseg].ds_addr; 903 ena_buf->len = segs[iseg].ds_len; 904 ena_buf++; 905 iseg++; 906 tx_info->num_of_bufs++; 907 } 908 909 return (0); 910 911 dma_error: 912 counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); 913 tx_info->mbuf = NULL; 914 return (rc); 915 } 916 917 static int 918 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 919 { 920 struct ena_adapter *adapter; 921 struct ena_tx_buffer *tx_info; 922 struct ena_com_tx_ctx ena_tx_ctx; 923 struct ena_com_dev *ena_dev; 924 struct ena_com_io_sq* io_sq; 925 void *push_hdr; 926 uint16_t next_to_use; 927 uint16_t req_id; 928 uint16_t ena_qid; 929 uint16_t header_len; 930 int rc; 931 int nb_hw_desc; 932 933 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 934 adapter = tx_ring->que->adapter; 935 ena_dev = adapter->ena_dev; 936 io_sq = &ena_dev->io_sq_queues[ena_qid]; 937 938 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); 939 if (unlikely(rc != 0)) { 940 ena_trace(ENA_WARNING, 941 "Failed to collapse mbuf! err: %d\n", rc); 942 return (rc); 943 } 944 945 ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len); 946 947 next_to_use = tx_ring->next_to_use; 948 req_id = tx_ring->free_tx_ids[next_to_use]; 949 tx_info = &tx_ring->tx_buffer_info[req_id]; 950 tx_info->num_of_bufs = 0; 951 952 rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); 953 if (unlikely(rc != 0)) { 954 ena_trace(ENA_WARNING, "Failed to map TX mbuf\n"); 955 return (rc); 956 } 957 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 958 ena_tx_ctx.ena_bufs = tx_info->bufs; 959 ena_tx_ctx.push_header = push_hdr; 960 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 961 ena_tx_ctx.req_id = req_id; 962 ena_tx_ctx.header_len = header_len; 963 964 /* Set flags and meta data */ 965 ena_tx_csum(&ena_tx_ctx, *mbuf, adapter->disable_meta_caching); 966 967 if (tx_ring->acum_pkts == DB_THRESHOLD || 968 ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) { 969 ena_trace(ENA_DBG | ENA_TXPTH, 970 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", 971 tx_ring->que->id); 972 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 973 counter_u64_add(tx_ring->tx_stats.doorbells, 1); 974 tx_ring->acum_pkts = 0; 975 } 976 977 /* Prepare the packet's descriptors and send them to device */ 978 rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); 979 if (unlikely(rc != 0)) { 980 if (likely(rc == ENA_COM_NO_MEM)) { 981 ena_trace(ENA_DBG | ENA_TXPTH, 982 "tx ring[%d] if out of space\n", tx_ring->que->id); 983 } else { 984 device_printf(adapter->pdev, 985 "failed to prepare tx bufs\n"); 986 } 987 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); 988 goto dma_error; 989 } 990 991 counter_enter(); 992 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); 993 counter_u64_add_protected(tx_ring->tx_stats.bytes, 994 (*mbuf)->m_pkthdr.len); 995 996 counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); 997 counter_u64_add_protected(adapter->hw_stats.tx_bytes, 998 (*mbuf)->m_pkthdr.len); 999 counter_exit(); 1000 1001 tx_info->tx_descs = nb_hw_desc; 1002 getbinuptime(&tx_info->timestamp); 1003 tx_info->print_once = true; 1004 1005 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 1006 tx_ring->ring_size); 1007 1008 /* stop the queue when no more space available, the packet can have up 1009 * to sgl_size + 2. one for the meta descriptor and one for header 1010 * (if the header is larger than tx_max_header_size). 1011 */ 1012 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 1013 adapter->max_tx_sgl_size + 2))) { 1014 ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n", 1015 tx_ring->que->id); 1016 1017 tx_ring->running = false; 1018 counter_u64_add(tx_ring->tx_stats.queue_stop, 1); 1019 1020 /* There is a rare condition where this function decides to 1021 * stop the queue but meanwhile tx_cleanup() updates 1022 * next_to_completion and terminates. 1023 * The queue will remain stopped forever. 1024 * To solve this issue this function performs mb(), checks 1025 * the wakeup condition and wakes up the queue if needed. 1026 */ 1027 mb(); 1028 1029 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 1030 ENA_TX_RESUME_THRESH)) { 1031 tx_ring->running = true; 1032 counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 1033 } 1034 } 1035 1036 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1037 BUS_DMASYNC_PREWRITE); 1038 1039 return (0); 1040 1041 dma_error: 1042 tx_info->mbuf = NULL; 1043 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 1044 1045 return (rc); 1046 } 1047 1048 static void 1049 ena_start_xmit(struct ena_ring *tx_ring) 1050 { 1051 struct mbuf *mbuf; 1052 struct ena_adapter *adapter = tx_ring->adapter; 1053 struct ena_com_io_sq* io_sq; 1054 int ena_qid; 1055 int ret = 0; 1056 1057 if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 1058 return; 1059 1060 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))) 1061 return; 1062 1063 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 1064 io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 1065 1066 while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { 1067 ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" 1068 " header csum flags %#jx\n", 1069 mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags); 1070 1071 if (unlikely(!tx_ring->running)) { 1072 drbr_putback(adapter->ifp, tx_ring->br, mbuf); 1073 break; 1074 } 1075 1076 if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { 1077 if (ret == ENA_COM_NO_MEM) { 1078 drbr_putback(adapter->ifp, tx_ring->br, mbuf); 1079 } else if (ret == ENA_COM_NO_SPACE) { 1080 drbr_putback(adapter->ifp, tx_ring->br, mbuf); 1081 } else { 1082 m_freem(mbuf); 1083 drbr_advance(adapter->ifp, tx_ring->br); 1084 } 1085 1086 break; 1087 } 1088 1089 drbr_advance(adapter->ifp, tx_ring->br); 1090 1091 if (unlikely((if_getdrvflags(adapter->ifp) & 1092 IFF_DRV_RUNNING) == 0)) 1093 return; 1094 1095 tx_ring->acum_pkts++; 1096 1097 BPF_MTAP(adapter->ifp, mbuf); 1098 } 1099 1100 if (likely(tx_ring->acum_pkts != 0)) { 1101 /* Trigger the dma engine */ 1102 ena_com_write_sq_doorbell(io_sq); 1103 counter_u64_add(tx_ring->tx_stats.doorbells, 1); 1104 tx_ring->acum_pkts = 0; 1105 } 1106 1107 if (unlikely(!tx_ring->running)) 1108 taskqueue_enqueue(tx_ring->que->cleanup_tq, 1109 &tx_ring->que->cleanup_task); 1110 } 1111