171d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */ 2015f8cc5SEric Joyner /* Copyright (c) 2024, Intel Corporation 371d10453SEric Joyner * All rights reserved. 471d10453SEric Joyner * 571d10453SEric Joyner * Redistribution and use in source and binary forms, with or without 671d10453SEric Joyner * modification, are permitted provided that the following conditions are met: 771d10453SEric Joyner * 871d10453SEric Joyner * 1. Redistributions of source code must retain the above copyright notice, 971d10453SEric Joyner * this list of conditions and the following disclaimer. 1071d10453SEric Joyner * 1171d10453SEric Joyner * 2. Redistributions in binary form must reproduce the above copyright 1271d10453SEric Joyner * notice, this list of conditions and the following disclaimer in the 1371d10453SEric Joyner * documentation and/or other materials provided with the distribution. 1471d10453SEric Joyner * 1571d10453SEric Joyner * 3. Neither the name of the Intel Corporation nor the names of its 1671d10453SEric Joyner * contributors may be used to endorse or promote products derived from 1771d10453SEric Joyner * this software without specific prior written permission. 1871d10453SEric Joyner * 1971d10453SEric Joyner * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 2071d10453SEric Joyner * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2171d10453SEric Joyner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2271d10453SEric Joyner * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 2371d10453SEric Joyner * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2471d10453SEric Joyner * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2571d10453SEric Joyner * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2671d10453SEric Joyner * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2771d10453SEric Joyner * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 2871d10453SEric Joyner * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 2971d10453SEric Joyner * POSSIBILITY OF SUCH DAMAGE. 3071d10453SEric Joyner */ 3171d10453SEric Joyner 3271d10453SEric Joyner /** 3371d10453SEric Joyner * @file ice_iflib_txrx.c 3471d10453SEric Joyner * @brief iflib Tx/Rx hotpath 3571d10453SEric Joyner * 3671d10453SEric Joyner * Main location for the iflib Tx/Rx hotpath implementation. 3771d10453SEric Joyner * 3871d10453SEric Joyner * Contains the implementation for the iflib function callbacks and the 3971d10453SEric Joyner * if_txrx ops structure. 4071d10453SEric Joyner */ 4171d10453SEric Joyner 4271d10453SEric Joyner #include "ice_iflib.h" 4371d10453SEric Joyner 4471d10453SEric Joyner /* Tx/Rx hotpath utility functions */ 4571d10453SEric Joyner #include "ice_common_txrx.h" 4671d10453SEric Joyner 4771d10453SEric Joyner /* 4871d10453SEric Joyner * iflib txrx method declarations 4971d10453SEric Joyner */ 5071d10453SEric Joyner static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi); 5171d10453SEric Joyner static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri); 5271d10453SEric Joyner static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); 5371d10453SEric Joyner static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear); 5471d10453SEric Joyner static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget); 5571d10453SEric Joyner static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx); 5671d10453SEric Joyner static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru); 578923de59SPiotr Kubaj static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi); 5871d10453SEric Joyner 5971d10453SEric Joyner /* Macro to help extract the NIC mode flexible Rx descriptor fields from the 6071d10453SEric Joyner * advanced 32byte Rx descriptors. 6171d10453SEric Joyner */ 6271d10453SEric Joyner #define RX_FLEX_NIC(desc, field) \ 6371d10453SEric Joyner (((struct ice_32b_rx_flex_desc_nic *)desc)->field) 6471d10453SEric Joyner 6571d10453SEric Joyner /** 6671d10453SEric Joyner * @var ice_txrx 6771d10453SEric Joyner * @brief Tx/Rx operations for the iflib stack 6871d10453SEric Joyner * 6971d10453SEric Joyner * Structure defining the Tx and Rx related operations that iflib can request 7071d10453SEric Joyner * the driver to perform. These are the main entry points for the hot path of 7171d10453SEric Joyner * the transmit and receive paths in the iflib driver. 7271d10453SEric Joyner */ 7371d10453SEric Joyner struct if_txrx ice_txrx = { 7471d10453SEric Joyner .ift_txd_encap = ice_ift_txd_encap, 7571d10453SEric Joyner .ift_txd_flush = ice_ift_txd_flush, 7671d10453SEric Joyner .ift_txd_credits_update = ice_ift_txd_credits_update, 7771d10453SEric Joyner .ift_rxd_available = ice_ift_rxd_available, 7871d10453SEric Joyner .ift_rxd_pkt_get = ice_ift_rxd_pkt_get, 7971d10453SEric Joyner .ift_rxd_refill = ice_ift_rxd_refill, 8071d10453SEric Joyner .ift_rxd_flush = ice_ift_rxd_flush, 818923de59SPiotr Kubaj .ift_txq_select_v2 = ice_ift_queue_select, 8271d10453SEric Joyner }; 8371d10453SEric Joyner 8471d10453SEric Joyner /** 8571d10453SEric Joyner * ice_ift_txd_encap - prepare Tx descriptors for a packet 8671d10453SEric Joyner * @arg: the iflib softc structure pointer 8771d10453SEric Joyner * @pi: packet info 8871d10453SEric Joyner * 8971d10453SEric Joyner * Prepares and encapsulates the given packet into into Tx descriptors, in 9071d10453SEric Joyner * preparation for sending to the transmit engine. Sets the necessary context 9171d10453SEric Joyner * descriptors for TSO and other offloads, and prepares the last descriptor 9271d10453SEric Joyner * for the writeback status. 9371d10453SEric Joyner * 9471d10453SEric Joyner * Return 0 on success, non-zero error code on failure. 9571d10453SEric Joyner */ 9671d10453SEric Joyner static int 9771d10453SEric Joyner ice_ift_txd_encap(void *arg, if_pkt_info_t pi) 9871d10453SEric Joyner { 9971d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 10071d10453SEric Joyner struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx]; 10171d10453SEric Joyner int nsegs = pi->ipi_nsegs; 10271d10453SEric Joyner bus_dma_segment_t *segs = pi->ipi_segs; 10371d10453SEric Joyner struct ice_tx_desc *txd = NULL; 10471d10453SEric Joyner int i, j, mask, pidx_last; 10571d10453SEric Joyner u32 cmd, off; 10671d10453SEric Joyner 10771d10453SEric Joyner cmd = off = 0; 10871d10453SEric Joyner i = pi->ipi_pidx; 10971d10453SEric Joyner 11071d10453SEric Joyner /* Set up the TSO/CSUM offload */ 11171d10453SEric Joyner if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) { 11271d10453SEric Joyner /* Set up the TSO context descriptor if required */ 11371d10453SEric Joyner if (pi->ipi_csum_flags & CSUM_TSO) { 11471d10453SEric Joyner if (ice_tso_detect_sparse(pi)) 11571d10453SEric Joyner return (EFBIG); 11671d10453SEric Joyner i = ice_tso_setup(txq, pi); 11771d10453SEric Joyner } 11871d10453SEric Joyner ice_tx_setup_offload(txq, pi, &cmd, &off); 11971d10453SEric Joyner } 12071d10453SEric Joyner if (pi->ipi_mflags & M_VLANTAG) 12171d10453SEric Joyner cmd |= ICE_TX_DESC_CMD_IL2TAG1; 12271d10453SEric Joyner 12371d10453SEric Joyner mask = txq->desc_count - 1; 12471d10453SEric Joyner for (j = 0; j < nsegs; j++) { 12571d10453SEric Joyner bus_size_t seglen; 12671d10453SEric Joyner 12771d10453SEric Joyner txd = &txq->tx_base[i]; 12871d10453SEric Joyner seglen = segs[j].ds_len; 12971d10453SEric Joyner 13071d10453SEric Joyner txd->buf_addr = htole64(segs[j].ds_addr); 13171d10453SEric Joyner txd->cmd_type_offset_bsz = 13271d10453SEric Joyner htole64(ICE_TX_DESC_DTYPE_DATA 13371d10453SEric Joyner | ((u64)cmd << ICE_TXD_QW1_CMD_S) 13471d10453SEric Joyner | ((u64)off << ICE_TXD_QW1_OFFSET_S) 13571d10453SEric Joyner | ((u64)seglen << ICE_TXD_QW1_TX_BUF_SZ_S) 13671d10453SEric Joyner | ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S)); 13771d10453SEric Joyner 13871d10453SEric Joyner txq->stats.tx_bytes += seglen; 13971d10453SEric Joyner pidx_last = i; 14071d10453SEric Joyner i = (i+1) & mask; 14171d10453SEric Joyner } 14271d10453SEric Joyner 14371d10453SEric Joyner /* Set the last descriptor for report */ 14471d10453SEric Joyner #define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) 14571d10453SEric Joyner txd->cmd_type_offset_bsz |= 14671d10453SEric Joyner htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S)); 14771d10453SEric Joyner 14871d10453SEric Joyner /* Add to report status array */ 14971d10453SEric Joyner txq->tx_rsq[txq->tx_rs_pidx] = pidx_last; 15071d10453SEric Joyner txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask; 15171d10453SEric Joyner MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx); 15271d10453SEric Joyner 15371d10453SEric Joyner pi->ipi_new_pidx = i; 15471d10453SEric Joyner 15571d10453SEric Joyner ++txq->stats.tx_packets; 15671d10453SEric Joyner return (0); 15771d10453SEric Joyner } 15871d10453SEric Joyner 15971d10453SEric Joyner /** 16071d10453SEric Joyner * ice_ift_txd_flush - Flush Tx descriptors to hardware 16171d10453SEric Joyner * @arg: device specific softc pointer 16271d10453SEric Joyner * @txqid: the Tx queue to flush 16371d10453SEric Joyner * @pidx: descriptor index to advance tail to 16471d10453SEric Joyner * 16571d10453SEric Joyner * Advance the Transmit Descriptor Tail (TDT). This indicates to hardware that 16671d10453SEric Joyner * frames are available for transmit. 16771d10453SEric Joyner */ 16871d10453SEric Joyner static void 16971d10453SEric Joyner ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 17071d10453SEric Joyner { 17171d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 17271d10453SEric Joyner struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid]; 17371d10453SEric Joyner struct ice_hw *hw = &sc->hw; 17471d10453SEric Joyner 17571d10453SEric Joyner wr32(hw, txq->tail, pidx); 17671d10453SEric Joyner } 17771d10453SEric Joyner 17871d10453SEric Joyner /** 17971d10453SEric Joyner * ice_ift_txd_credits_update - cleanup Tx descriptors 18071d10453SEric Joyner * @arg: device private softc 18171d10453SEric Joyner * @txqid: the Tx queue to update 18271d10453SEric Joyner * @clear: if false, only report, do not actually clean 18371d10453SEric Joyner * 18471d10453SEric Joyner * If clear is false, iflib is asking if we *could* clean up any Tx 18571d10453SEric Joyner * descriptors. 18671d10453SEric Joyner * 18771d10453SEric Joyner * If clear is true, iflib is requesting to cleanup and reclaim used Tx 18871d10453SEric Joyner * descriptors. 18971d10453SEric Joyner */ 19071d10453SEric Joyner static int 19171d10453SEric Joyner ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear) 19271d10453SEric Joyner { 19371d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 19471d10453SEric Joyner struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid]; 19571d10453SEric Joyner 19671d10453SEric Joyner qidx_t processed = 0; 19771d10453SEric Joyner qidx_t cur, prev, ntxd, rs_cidx; 19871d10453SEric Joyner int32_t delta; 19971d10453SEric Joyner bool is_done; 20071d10453SEric Joyner 20171d10453SEric Joyner rs_cidx = txq->tx_rs_cidx; 20271d10453SEric Joyner if (rs_cidx == txq->tx_rs_pidx) 20371d10453SEric Joyner return (0); 20471d10453SEric Joyner cur = txq->tx_rsq[rs_cidx]; 20571d10453SEric Joyner MPASS(cur != QIDX_INVALID); 20671d10453SEric Joyner is_done = ice_is_tx_desc_done(&txq->tx_base[cur]); 20771d10453SEric Joyner 20871d10453SEric Joyner if (!is_done) 20971d10453SEric Joyner return (0); 21071d10453SEric Joyner else if (clear == false) 21171d10453SEric Joyner return (1); 21271d10453SEric Joyner 21371d10453SEric Joyner prev = txq->tx_cidx_processed; 21471d10453SEric Joyner ntxd = txq->desc_count; 21571d10453SEric Joyner do { 21671d10453SEric Joyner MPASS(prev != cur); 21771d10453SEric Joyner delta = (int32_t)cur - (int32_t)prev; 21871d10453SEric Joyner if (delta < 0) 21971d10453SEric Joyner delta += ntxd; 22071d10453SEric Joyner MPASS(delta > 0); 22171d10453SEric Joyner processed += delta; 22271d10453SEric Joyner prev = cur; 22371d10453SEric Joyner rs_cidx = (rs_cidx + 1) & (ntxd-1); 22471d10453SEric Joyner if (rs_cidx == txq->tx_rs_pidx) 22571d10453SEric Joyner break; 22671d10453SEric Joyner cur = txq->tx_rsq[rs_cidx]; 22771d10453SEric Joyner MPASS(cur != QIDX_INVALID); 22871d10453SEric Joyner is_done = ice_is_tx_desc_done(&txq->tx_base[cur]); 22971d10453SEric Joyner } while (is_done); 23071d10453SEric Joyner 23171d10453SEric Joyner txq->tx_rs_cidx = rs_cidx; 23271d10453SEric Joyner txq->tx_cidx_processed = prev; 23371d10453SEric Joyner 23471d10453SEric Joyner return (processed); 23571d10453SEric Joyner } 23671d10453SEric Joyner 23771d10453SEric Joyner /** 23871d10453SEric Joyner * ice_ift_rxd_available - Return number of available Rx packets 23971d10453SEric Joyner * @arg: device private softc 24071d10453SEric Joyner * @rxqid: the Rx queue id 24171d10453SEric Joyner * @pidx: descriptor start point 24271d10453SEric Joyner * @budget: maximum Rx budget 24371d10453SEric Joyner * 24471d10453SEric Joyner * Determines how many Rx packets are available on the queue, up to a maximum 24571d10453SEric Joyner * of the given budget. 24671d10453SEric Joyner */ 24771d10453SEric Joyner static int 24871d10453SEric Joyner ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget) 24971d10453SEric Joyner { 25071d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 25171d10453SEric Joyner struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid]; 25271d10453SEric Joyner union ice_32b_rx_flex_desc *rxd; 25371d10453SEric Joyner uint16_t status0; 25471d10453SEric Joyner int cnt, i, nrxd; 25571d10453SEric Joyner 25671d10453SEric Joyner nrxd = rxq->desc_count; 25771d10453SEric Joyner 25871d10453SEric Joyner for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) { 25971d10453SEric Joyner rxd = &rxq->rx_base[i]; 26071d10453SEric Joyner status0 = le16toh(rxd->wb.status_error0); 26171d10453SEric Joyner 26271d10453SEric Joyner if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0) 26371d10453SEric Joyner break; 26471d10453SEric Joyner if (++i == nrxd) 26571d10453SEric Joyner i = 0; 26671d10453SEric Joyner if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)) 26771d10453SEric Joyner cnt++; 26871d10453SEric Joyner } 26971d10453SEric Joyner 27071d10453SEric Joyner return (cnt); 27171d10453SEric Joyner } 27271d10453SEric Joyner 27371d10453SEric Joyner /** 27471d10453SEric Joyner * ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer 27571d10453SEric Joyner * @arg: device specific softc 27671d10453SEric Joyner * @ri: receive packet info 27771d10453SEric Joyner * 27871d10453SEric Joyner * This function is called by iflib, and executes in ithread context. It is 27971d10453SEric Joyner * called by iflib to obtain data which has been DMA'ed into host memory. 28056429daeSEric Joyner * Returns zero on success, and EBADMSG on failure. 28171d10453SEric Joyner */ 28271d10453SEric Joyner static int 28371d10453SEric Joyner ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri) 28471d10453SEric Joyner { 28571d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 286949d971fSEric Joyner if_softc_ctx_t scctx = sc->scctx; 28771d10453SEric Joyner struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx]; 28871d10453SEric Joyner union ice_32b_rx_flex_desc *cur; 289f7926a6dSVincenzo Maffione u16 status0, plen, ptype; 29071d10453SEric Joyner bool eop; 29171d10453SEric Joyner size_t cidx; 29271d10453SEric Joyner int i; 29371d10453SEric Joyner 29471d10453SEric Joyner cidx = ri->iri_cidx; 29571d10453SEric Joyner i = 0; 29671d10453SEric Joyner do { 29771d10453SEric Joyner /* 5 descriptor receive limit */ 29871d10453SEric Joyner MPASS(i < ICE_MAX_RX_SEGS); 29971d10453SEric Joyner 30071d10453SEric Joyner cur = &rxq->rx_base[cidx]; 30171d10453SEric Joyner status0 = le16toh(cur->wb.status_error0); 30271d10453SEric Joyner plen = le16toh(cur->wb.pkt_len) & 30371d10453SEric Joyner ICE_RX_FLX_DESC_PKT_LEN_M; 30471d10453SEric Joyner 30571d10453SEric Joyner /* we should never be called without a valid descriptor */ 30671d10453SEric Joyner MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0); 30771d10453SEric Joyner 30871d10453SEric Joyner ri->iri_len += plen; 30971d10453SEric Joyner 31071d10453SEric Joyner cur->wb.status_error0 = 0; 31171d10453SEric Joyner eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)); 31271d10453SEric Joyner 31371d10453SEric Joyner ri->iri_frags[i].irf_flid = 0; 31471d10453SEric Joyner ri->iri_frags[i].irf_idx = cidx; 31571d10453SEric Joyner ri->iri_frags[i].irf_len = plen; 31671d10453SEric Joyner if (++cidx == rxq->desc_count) 31771d10453SEric Joyner cidx = 0; 31871d10453SEric Joyner i++; 31971d10453SEric Joyner } while (!eop); 32071d10453SEric Joyner 32156429daeSEric Joyner /* End of Packet reached; cur is eop/last descriptor */ 32271d10453SEric Joyner 32356429daeSEric Joyner /* Make sure packets with bad L2 values are discarded. 32456429daeSEric Joyner * This bit is only valid in the last descriptor. 32556429daeSEric Joyner */ 32656429daeSEric Joyner if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S)) { 32756429daeSEric Joyner rxq->stats.desc_errs++; 32856429daeSEric Joyner return (EBADMSG); 32956429daeSEric Joyner } 33056429daeSEric Joyner 33156429daeSEric Joyner /* Get VLAN tag information if one is in descriptor */ 332f7926a6dSVincenzo Maffione if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { 333f7926a6dSVincenzo Maffione ri->iri_vtag = le16toh(cur->wb.l2tag1); 33471d10453SEric Joyner ri->iri_flags |= M_VLANTAG; 335f7926a6dSVincenzo Maffione } 33656429daeSEric Joyner 33756429daeSEric Joyner /* Capture soft statistics for this Rx queue */ 33856429daeSEric Joyner rxq->stats.rx_packets++; 33956429daeSEric Joyner rxq->stats.rx_bytes += ri->iri_len; 34056429daeSEric Joyner 34156429daeSEric Joyner /* Get packet type and set checksum flags */ 34256429daeSEric Joyner ptype = le16toh(cur->wb.ptype_flex_flags0) & 34356429daeSEric Joyner ICE_RX_FLEX_DESC_PTYPE_M; 344949d971fSEric Joyner if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) 34556429daeSEric Joyner ice_rx_checksum(rxq, &ri->iri_csum_flags, 34656429daeSEric Joyner &ri->iri_csum_data, status0, ptype); 34756429daeSEric Joyner 34856429daeSEric Joyner /* Set remaining iflib RX descriptor info fields */ 34956429daeSEric Joyner ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash)); 35056429daeSEric Joyner ri->iri_rsstype = ice_ptype_to_hash(ptype); 351f7926a6dSVincenzo Maffione ri->iri_nfrags = i; 35271d10453SEric Joyner return (0); 35371d10453SEric Joyner } 35471d10453SEric Joyner 35571d10453SEric Joyner /** 35671d10453SEric Joyner * ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware 35771d10453SEric Joyner * @arg: device specific softc structure 35871d10453SEric Joyner * @iru: the Rx descriptor update structure 35971d10453SEric Joyner * 36071d10453SEric Joyner * Update the Rx descriptor indices for a given queue, assigning new physical 36171d10453SEric Joyner * addresses to the descriptors, preparing them for re-use by the hardware. 36271d10453SEric Joyner */ 36371d10453SEric Joyner static void 36471d10453SEric Joyner ice_ift_rxd_refill(void *arg, if_rxd_update_t iru) 36571d10453SEric Joyner { 36671d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 36771d10453SEric Joyner struct ice_rx_queue *rxq; 36871d10453SEric Joyner uint32_t next_pidx; 36971d10453SEric Joyner int i; 37071d10453SEric Joyner uint64_t *paddrs; 37171d10453SEric Joyner uint32_t pidx; 37271d10453SEric Joyner uint16_t qsidx, count; 37371d10453SEric Joyner 37471d10453SEric Joyner paddrs = iru->iru_paddrs; 37571d10453SEric Joyner pidx = iru->iru_pidx; 37671d10453SEric Joyner qsidx = iru->iru_qsidx; 37771d10453SEric Joyner count = iru->iru_count; 37871d10453SEric Joyner 37971d10453SEric Joyner rxq = &(sc->pf_vsi.rx_queues[qsidx]); 38071d10453SEric Joyner 38171d10453SEric Joyner for (i = 0, next_pidx = pidx; i < count; i++) { 38271d10453SEric Joyner rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]); 38371d10453SEric Joyner if (++next_pidx == (uint32_t)rxq->desc_count) 38471d10453SEric Joyner next_pidx = 0; 38571d10453SEric Joyner } 38671d10453SEric Joyner } 38771d10453SEric Joyner 38871d10453SEric Joyner /** 38971d10453SEric Joyner * ice_ift_rxd_flush - Flush Rx descriptors to hardware 39071d10453SEric Joyner * @arg: device specific softc pointer 39171d10453SEric Joyner * @rxqid: the Rx queue to flush 39271d10453SEric Joyner * @flidx: unused parameter 39371d10453SEric Joyner * @pidx: descriptor index to advance tail to 39471d10453SEric Joyner * 39571d10453SEric Joyner * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that 39671d10453SEric Joyner * software is done with the descriptor and it can be recycled. 39771d10453SEric Joyner */ 39871d10453SEric Joyner static void 39971d10453SEric Joyner ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused, 40071d10453SEric Joyner qidx_t pidx) 40171d10453SEric Joyner { 40271d10453SEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 40371d10453SEric Joyner struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid]; 40471d10453SEric Joyner struct ice_hw *hw = &sc->hw; 40571d10453SEric Joyner 40671d10453SEric Joyner wr32(hw, rxq->tail, pidx); 40771d10453SEric Joyner } 40856429daeSEric Joyner 40956429daeSEric Joyner static qidx_t 4108923de59SPiotr Kubaj ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi) 41156429daeSEric Joyner { 41256429daeSEric Joyner struct ice_softc *sc = (struct ice_softc *)arg; 4138923de59SPiotr Kubaj struct ice_dcbx_cfg *local_dcbx_cfg; 41456429daeSEric Joyner struct ice_vsi *vsi = &sc->pf_vsi; 41556429daeSEric Joyner u16 tc_base_queue, tc_qcount; 41656429daeSEric Joyner u8 up, tc; 41756429daeSEric Joyner 41861d83041SEric Joyner #ifdef ALTQ 41961d83041SEric Joyner /* Included to match default iflib behavior */ 42056429daeSEric Joyner /* Only go out on default queue if ALTQ is enabled */ 42161d83041SEric Joyner struct ifnet *ifp = (struct ifnet *)iflib_get_ifp(sc->ctx); 4228d5feedeSJustin Hibbits if (if_altq_is_enabled(ifp)) 42356429daeSEric Joyner return (0); 42461d83041SEric Joyner #endif 42556429daeSEric Joyner 42656429daeSEric Joyner if (!ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) { 42756429daeSEric Joyner if (M_HASHTYPE_GET(m)) { 42856429daeSEric Joyner /* Default iflib queue selection method */ 42956429daeSEric Joyner return (m->m_pkthdr.flowid % sc->pf_vsi.num_tx_queues); 43056429daeSEric Joyner } else 43156429daeSEric Joyner return (0); 43256429daeSEric Joyner } 43356429daeSEric Joyner 4348923de59SPiotr Kubaj /* Use default TC unless overridden later */ 43556429daeSEric Joyner tc = 0; /* XXX: Get default TC for traffic if >1 TC? */ 43656429daeSEric Joyner 4378923de59SPiotr Kubaj local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg; 4388923de59SPiotr Kubaj 4398923de59SPiotr Kubaj #if defined(INET) || defined(INET6) 4408923de59SPiotr Kubaj if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) && 4418923de59SPiotr Kubaj (pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) { 4428923de59SPiotr Kubaj u8 dscp_val = pi->ipi_ip_tos >> 2; 4438923de59SPiotr Kubaj tc = local_dcbx_cfg->dscp_map[dscp_val]; 4448923de59SPiotr Kubaj } else 4458923de59SPiotr Kubaj #endif /* defined(INET) || defined(INET6) */ 4468923de59SPiotr Kubaj if (m->m_flags & M_VLANTAG) { /* ICE_QOS_MODE_VLAN */ 44756429daeSEric Joyner up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag); 4488923de59SPiotr Kubaj tc = local_dcbx_cfg->etscfg.prio_table[up]; 44956429daeSEric Joyner } 45056429daeSEric Joyner 45156429daeSEric Joyner tc_base_queue = vsi->tc_info[tc].qoffset; 45256429daeSEric Joyner tc_qcount = vsi->tc_info[tc].qcount_tx; 45356429daeSEric Joyner 45456429daeSEric Joyner if (M_HASHTYPE_GET(m)) 45556429daeSEric Joyner return ((m->m_pkthdr.flowid % tc_qcount) + tc_base_queue); 45656429daeSEric Joyner else 45756429daeSEric Joyner return (tc_base_queue); 45856429daeSEric Joyner } 459