1e948693eSPhilip Paeps /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 4929c7febSAndrew Rybchenko * Copyright (c) 2010-2016 Solarflare Communications Inc. 5e948693eSPhilip Paeps * All rights reserved. 6e948693eSPhilip Paeps * 7e948693eSPhilip Paeps * This software was developed in part by Philip Paeps under contract for 8e948693eSPhilip Paeps * Solarflare Communications, Inc. 9e948693eSPhilip Paeps * 10e948693eSPhilip Paeps * Redistribution and use in source and binary forms, with or without 113c838a9fSAndrew Rybchenko * modification, are permitted provided that the following conditions are met: 12e948693eSPhilip Paeps * 133c838a9fSAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice, 143c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer. 153c838a9fSAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice, 163c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation 173c838a9fSAndrew Rybchenko * and/or other materials provided with the distribution. 183c838a9fSAndrew Rybchenko * 193c838a9fSAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 203c838a9fSAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 213c838a9fSAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 223c838a9fSAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 233c838a9fSAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 243c838a9fSAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 253c838a9fSAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 263c838a9fSAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 273c838a9fSAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 283c838a9fSAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 293c838a9fSAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303c838a9fSAndrew Rybchenko * 313c838a9fSAndrew Rybchenko * The views and conclusions contained in the software and documentation are 323c838a9fSAndrew Rybchenko * those of the authors and should not be interpreted as representing official 333c838a9fSAndrew Rybchenko * policies, either expressed or implied, of the FreeBSD Project. 34e948693eSPhilip Paeps */ 35e948693eSPhilip Paeps 36cf07c70dSGeorge V. Neville-Neil /* Theory of operation: 37cf07c70dSGeorge V. Neville-Neil * 388b447157SAndrew Rybchenko * Tx queues allocation and mapping on Siena 39cf07c70dSGeorge V. Neville-Neil * 40cf07c70dSGeorge V. Neville-Neil * One Tx queue with enabled checksum offload is allocated per Rx channel 41cf07c70dSGeorge V. Neville-Neil * (event queue). Also 2 Tx queues (one without checksum offload and one 42cf07c70dSGeorge V. Neville-Neil * with IP checksum offload only) are allocated and bound to event queue 0. 43cf07c70dSGeorge V. Neville-Neil * sfxge_txq_type is used as Tx queue label. 44cf07c70dSGeorge V. Neville-Neil * 45cf07c70dSGeorge V. Neville-Neil * So, event queue plus label mapping to Tx queue index is: 46cf07c70dSGeorge V. Neville-Neil * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 47cf07c70dSGeorge V. Neville-Neil * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 48cf07c70dSGeorge V. Neville-Neil * See sfxge_get_txq_by_label() sfxge_ev.c 498b447157SAndrew Rybchenko * 508b447157SAndrew Rybchenko * Tx queue allocation and mapping on EF10 518b447157SAndrew Rybchenko * 528b447157SAndrew Rybchenko * One Tx queue with enabled checksum offload is allocated per Rx 538b447157SAndrew Rybchenko * channel (event queue). Checksum offload on all Tx queues is enabled or 548b447157SAndrew Rybchenko * disabled dynamically by inserting option descriptors, so the additional 558b447157SAndrew Rybchenko * queues used on Siena are not required. 568b447157SAndrew Rybchenko * 578b447157SAndrew Rybchenko * TxQ label is always set to zero on EF10 hardware. 588b447157SAndrew Rybchenko * So, event queue to Tx queue mapping is simple: 598b447157SAndrew Rybchenko * TxQ-index = EvQ-index 60cf07c70dSGeorge V. Neville-Neil */ 61cf07c70dSGeorge V. Neville-Neil 62e948693eSPhilip Paeps #include <sys/cdefs.h> 63e948693eSPhilip Paeps __FBSDID("$FreeBSD$"); 64e948693eSPhilip Paeps 653bbc1e08SAndrew Rybchenko #include "opt_rss.h" 663bbc1e08SAndrew Rybchenko 678ec07310SGleb Smirnoff #include <sys/param.h> 688ec07310SGleb Smirnoff #include <sys/malloc.h> 69e948693eSPhilip Paeps #include <sys/mbuf.h> 70e948693eSPhilip Paeps #include <sys/smp.h> 71e948693eSPhilip Paeps #include <sys/socket.h> 72e948693eSPhilip Paeps #include <sys/sysctl.h> 73060a95efSGeorge V. Neville-Neil #include <sys/syslog.h> 74a45a0da1SAndrew Rybchenko #include <sys/limits.h> 75e948693eSPhilip Paeps 76e948693eSPhilip Paeps #include <net/bpf.h> 77e948693eSPhilip Paeps #include <net/ethernet.h> 78e948693eSPhilip Paeps #include <net/if.h> 79e948693eSPhilip Paeps #include <net/if_vlan_var.h> 80e948693eSPhilip Paeps 81e948693eSPhilip Paeps #include <netinet/in.h> 82e948693eSPhilip Paeps #include <netinet/ip.h> 83e948693eSPhilip Paeps #include <netinet/ip6.h> 84e948693eSPhilip Paeps #include <netinet/tcp.h> 85e948693eSPhilip Paeps 863bbc1e08SAndrew Rybchenko #ifdef RSS 873bbc1e08SAndrew Rybchenko #include <net/rss_config.h> 883bbc1e08SAndrew Rybchenko #endif 893bbc1e08SAndrew Rybchenko 90e948693eSPhilip Paeps #include "common/efx.h" 91e948693eSPhilip Paeps 92e948693eSPhilip Paeps #include "sfxge.h" 93e948693eSPhilip Paeps #include "sfxge_tx.h" 94e948693eSPhilip Paeps 95060a95efSGeorge V. Neville-Neil 96060a95efSGeorge V. Neville-Neil #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) 97060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; 98060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); 99060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, 100060a95efSGeorge V. Neville-Neil &sfxge_tx_dpl_get_max, 0, 10193929f25SAndrew Rybchenko "Maximum number of any packets in deferred packet get-list"); 10293929f25SAndrew Rybchenko 10393929f25SAndrew Rybchenko #define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ 10493929f25SAndrew Rybchenko SFXGE_PARAM(tx_dpl_get_non_tcp_max) 10593929f25SAndrew Rybchenko static int sfxge_tx_dpl_get_non_tcp_max = 10693929f25SAndrew Rybchenko SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; 10793929f25SAndrew Rybchenko TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); 10893929f25SAndrew Rybchenko SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, 10993929f25SAndrew Rybchenko &sfxge_tx_dpl_get_non_tcp_max, 0, 11093929f25SAndrew Rybchenko "Maximum number of non-TCP packets in deferred packet get-list"); 111060a95efSGeorge V. Neville-Neil 112060a95efSGeorge V. Neville-Neil #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) 113060a95efSGeorge V. Neville-Neil static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; 114060a95efSGeorge V. Neville-Neil TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); 115060a95efSGeorge V. Neville-Neil SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, 116060a95efSGeorge V. Neville-Neil &sfxge_tx_dpl_put_max, 0, 11793929f25SAndrew Rybchenko "Maximum number of any packets in deferred packet put-list"); 118060a95efSGeorge V. Neville-Neil 1193c838a9fSAndrew Rybchenko #define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted) 120a45a0da1SAndrew Rybchenko static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2); 1213c838a9fSAndrew Rybchenko TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted); 1223c838a9fSAndrew Rybchenko SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN, 1233c838a9fSAndrew Rybchenko &sfxge_tso_fw_assisted, 0, 124a45a0da1SAndrew Rybchenko "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware"); 1253c838a9fSAndrew Rybchenko 126060a95efSGeorge V. Neville-Neil 127f6222d7bSAndrew Rybchenko static const struct { 128f6222d7bSAndrew Rybchenko const char *name; 129f6222d7bSAndrew Rybchenko size_t offset; 130f6222d7bSAndrew Rybchenko } sfxge_tx_stats[] = { 131f6222d7bSAndrew Rybchenko #define SFXGE_TX_STAT(name, member) \ 132f6222d7bSAndrew Rybchenko { #name, offsetof(struct sfxge_txq, member) } 133f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_bursts, tso_bursts), 134f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_packets, tso_packets), 135f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 136f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), 137f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), 138f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_collapses, collapses), 139f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_drops, drops), 140f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_get_overflow, get_overflow), 141f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), 142f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_put_overflow, put_overflow), 143f6222d7bSAndrew Rybchenko SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), 144f6222d7bSAndrew Rybchenko }; 145f6222d7bSAndrew Rybchenko 146f6222d7bSAndrew Rybchenko 147e948693eSPhilip Paeps /* Forward declarations. */ 1480b28bbdcSAndrew Rybchenko static void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 149e948693eSPhilip Paeps static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 150e948693eSPhilip Paeps static void sfxge_tx_qunblock(struct sfxge_txq *txq); 151e948693eSPhilip Paeps static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 1523c838a9fSAndrew Rybchenko const bus_dma_segment_t *dma_seg, int n_dma_seg, 1538b447157SAndrew Rybchenko int n_extra_descs); 1543c838a9fSAndrew Rybchenko 1553c838a9fSAndrew Rybchenko static inline void 1563c838a9fSAndrew Rybchenko sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp) 1573c838a9fSAndrew Rybchenko { 1583c838a9fSAndrew Rybchenko KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0")); 1593c838a9fSAndrew Rybchenko if (__predict_false(*pstmp == 1603c838a9fSAndrew Rybchenko &txq->stmp[txq->ptr_mask])) 1613c838a9fSAndrew Rybchenko *pstmp = &txq->stmp[0]; 1623c838a9fSAndrew Rybchenko else 1633c838a9fSAndrew Rybchenko (*pstmp)++; 1643c838a9fSAndrew Rybchenko } 1653c838a9fSAndrew Rybchenko 1668b447157SAndrew Rybchenko static int 1678b447157SAndrew Rybchenko sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq *txq, struct mbuf *mbuf, 1688b447157SAndrew Rybchenko struct sfxge_tx_mapping **pstmp) 1698b447157SAndrew Rybchenko { 1708b447157SAndrew Rybchenko uint16_t new_hw_cksum_flags; 1718b447157SAndrew Rybchenko efx_desc_t *desc; 1728b447157SAndrew Rybchenko 1738b447157SAndrew Rybchenko if (mbuf->m_pkthdr.csum_flags & 1748b447157SAndrew Rybchenko (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6 | CSUM_TSO)) { 1758b447157SAndrew Rybchenko /* 1768b447157SAndrew Rybchenko * We always set EFX_TXQ_CKSUM_IPV4 here because this 1778b447157SAndrew Rybchenko * configuration is the most useful, and this won't 1788b447157SAndrew Rybchenko * cause any trouble in case of IPv6 traffic anyway. 1798b447157SAndrew Rybchenko */ 1808b447157SAndrew Rybchenko new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; 1818b447157SAndrew Rybchenko } else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 1828b447157SAndrew Rybchenko new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4; 1838b447157SAndrew Rybchenko } else { 1848b447157SAndrew Rybchenko new_hw_cksum_flags = 0; 1858b447157SAndrew Rybchenko } 1868b447157SAndrew Rybchenko 1878b447157SAndrew Rybchenko if (new_hw_cksum_flags == txq->hw_cksum_flags) 1888b447157SAndrew Rybchenko return (0); 1898b447157SAndrew Rybchenko 1908b447157SAndrew Rybchenko desc = &txq->pend_desc[txq->n_pend_desc]; 1918b447157SAndrew Rybchenko efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc); 1928b447157SAndrew Rybchenko txq->hw_cksum_flags = new_hw_cksum_flags; 1938b447157SAndrew Rybchenko txq->n_pend_desc++; 1948b447157SAndrew Rybchenko 1958b447157SAndrew Rybchenko sfxge_next_stmp(txq, pstmp); 1968b447157SAndrew Rybchenko 1978b447157SAndrew Rybchenko return (1); 1988b447157SAndrew Rybchenko } 1998b447157SAndrew Rybchenko 2008b447157SAndrew Rybchenko static int 2018b447157SAndrew Rybchenko sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf, 2028b447157SAndrew Rybchenko struct sfxge_tx_mapping **pstmp) 2038b447157SAndrew Rybchenko { 2048b447157SAndrew Rybchenko uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ? 2058b447157SAndrew Rybchenko mbuf->m_pkthdr.ether_vtag : 2068b447157SAndrew Rybchenko 0); 2078b447157SAndrew Rybchenko efx_desc_t *desc; 2088b447157SAndrew Rybchenko 2098b447157SAndrew Rybchenko if (this_tag == txq->hw_vlan_tci) 2108b447157SAndrew Rybchenko return (0); 2118b447157SAndrew Rybchenko 2128b447157SAndrew Rybchenko desc = &txq->pend_desc[txq->n_pend_desc]; 2138b447157SAndrew Rybchenko efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc); 2148b447157SAndrew Rybchenko txq->hw_vlan_tci = this_tag; 2158b447157SAndrew Rybchenko txq->n_pend_desc++; 2168b447157SAndrew Rybchenko 2178b447157SAndrew Rybchenko sfxge_next_stmp(txq, pstmp); 2188b447157SAndrew Rybchenko 2198b447157SAndrew Rybchenko return (1); 2208b447157SAndrew Rybchenko } 221e948693eSPhilip Paeps 222e948693eSPhilip Paeps void 223cc933626SAndrew Rybchenko sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) 224e948693eSPhilip Paeps { 225e948693eSPhilip Paeps unsigned int completed; 226e948693eSPhilip Paeps 227763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 228e948693eSPhilip Paeps 229e948693eSPhilip Paeps completed = txq->completed; 230e948693eSPhilip Paeps while (completed != txq->pending) { 231e948693eSPhilip Paeps struct sfxge_tx_mapping *stmp; 232e948693eSPhilip Paeps unsigned int id; 233e948693eSPhilip Paeps 234385b1d8eSGeorge V. Neville-Neil id = completed++ & txq->ptr_mask; 235e948693eSPhilip Paeps 236e948693eSPhilip Paeps stmp = &txq->stmp[id]; 237e948693eSPhilip Paeps if (stmp->flags & TX_BUF_UNMAP) { 238e948693eSPhilip Paeps bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 239e948693eSPhilip Paeps if (stmp->flags & TX_BUF_MBUF) { 240e948693eSPhilip Paeps struct mbuf *m = stmp->u.mbuf; 241e948693eSPhilip Paeps do 242e948693eSPhilip Paeps m = m_free(m); 243e948693eSPhilip Paeps while (m != NULL); 244e948693eSPhilip Paeps } else { 245e948693eSPhilip Paeps free(stmp->u.heap_buf, M_SFXGE); 246e948693eSPhilip Paeps } 247e948693eSPhilip Paeps stmp->flags = 0; 248e948693eSPhilip Paeps } 249e948693eSPhilip Paeps } 250e948693eSPhilip Paeps txq->completed = completed; 251e948693eSPhilip Paeps 252e948693eSPhilip Paeps /* Check whether we need to unblock the queue. */ 253e948693eSPhilip Paeps mb(); 254e948693eSPhilip Paeps if (txq->blocked) { 255e948693eSPhilip Paeps unsigned int level; 256e948693eSPhilip Paeps 257e948693eSPhilip Paeps level = txq->added - txq->completed; 258385b1d8eSGeorge V. Neville-Neil if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 259e948693eSPhilip Paeps sfxge_tx_qunblock(txq); 260e948693eSPhilip Paeps } 261e948693eSPhilip Paeps } 262e948693eSPhilip Paeps 2630b28bbdcSAndrew Rybchenko static unsigned int 26493929f25SAndrew Rybchenko sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) 26593929f25SAndrew Rybchenko { 266453130d9SPedro F. Giffuni /* Absence of TCP checksum flags does not mean that it is non-TCP 26793929f25SAndrew Rybchenko * but it should be true if user wants to achieve high throughput. 26893929f25SAndrew Rybchenko */ 26993929f25SAndrew Rybchenko return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); 27093929f25SAndrew Rybchenko } 27193929f25SAndrew Rybchenko 272e948693eSPhilip Paeps /* 273e948693eSPhilip Paeps * Reorder the put list and append it to the get list. 274e948693eSPhilip Paeps */ 275e948693eSPhilip Paeps static void 276e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 277e948693eSPhilip Paeps { 278e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 279e948693eSPhilip Paeps struct mbuf *mbuf, *get_next, **get_tailp; 280e948693eSPhilip Paeps volatile uintptr_t *putp; 281e948693eSPhilip Paeps uintptr_t put; 282e948693eSPhilip Paeps unsigned int count; 28393929f25SAndrew Rybchenko unsigned int non_tcp_count; 284e948693eSPhilip Paeps 285763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 286e948693eSPhilip Paeps 287e948693eSPhilip Paeps stdp = &txq->dpl; 288e948693eSPhilip Paeps 289e948693eSPhilip Paeps /* Acquire the put list. */ 290e948693eSPhilip Paeps putp = &stdp->std_put; 291fb8ccc78SMarius Strobl put = atomic_readandclear_ptr(putp); 292e948693eSPhilip Paeps mbuf = (void *)put; 293e948693eSPhilip Paeps 294e948693eSPhilip Paeps if (mbuf == NULL) 295e948693eSPhilip Paeps return; 296e948693eSPhilip Paeps 297e948693eSPhilip Paeps /* Reverse the put list. */ 298e948693eSPhilip Paeps get_tailp = &mbuf->m_nextpkt; 299e948693eSPhilip Paeps get_next = NULL; 300e948693eSPhilip Paeps 301e948693eSPhilip Paeps count = 0; 30293929f25SAndrew Rybchenko non_tcp_count = 0; 303e948693eSPhilip Paeps do { 304e948693eSPhilip Paeps struct mbuf *put_next; 305e948693eSPhilip Paeps 30693929f25SAndrew Rybchenko non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); 307e948693eSPhilip Paeps put_next = mbuf->m_nextpkt; 308e948693eSPhilip Paeps mbuf->m_nextpkt = get_next; 309e948693eSPhilip Paeps get_next = mbuf; 310e948693eSPhilip Paeps mbuf = put_next; 311e948693eSPhilip Paeps 312e948693eSPhilip Paeps count++; 313e948693eSPhilip Paeps } while (mbuf != NULL); 314e948693eSPhilip Paeps 315bce6d281SAndrew Rybchenko if (count > stdp->std_put_hiwat) 316bce6d281SAndrew Rybchenko stdp->std_put_hiwat = count; 317bce6d281SAndrew Rybchenko 318e948693eSPhilip Paeps /* Append the reversed put list to the get list. */ 319e948693eSPhilip Paeps KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 320e948693eSPhilip Paeps *stdp->std_getp = get_next; 321e948693eSPhilip Paeps stdp->std_getp = get_tailp; 322bc85c897SGeorge V. Neville-Neil stdp->std_get_count += count; 32393929f25SAndrew Rybchenko stdp->std_get_non_tcp_count += non_tcp_count; 324e948693eSPhilip Paeps } 325e948693eSPhilip Paeps 326e948693eSPhilip Paeps static void 327e948693eSPhilip Paeps sfxge_tx_qreap(struct sfxge_txq *txq) 328e948693eSPhilip Paeps { 329763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 330e948693eSPhilip Paeps 331e948693eSPhilip Paeps txq->reaped = txq->completed; 332e948693eSPhilip Paeps } 333e948693eSPhilip Paeps 334e948693eSPhilip Paeps static void 335e948693eSPhilip Paeps sfxge_tx_qlist_post(struct sfxge_txq *txq) 336e948693eSPhilip Paeps { 337e948693eSPhilip Paeps unsigned int old_added; 3383c838a9fSAndrew Rybchenko unsigned int block_level; 339e948693eSPhilip Paeps unsigned int level; 340e948693eSPhilip Paeps int rc; 341e948693eSPhilip Paeps 342763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 343e948693eSPhilip Paeps 344e948693eSPhilip Paeps KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 3453c838a9fSAndrew Rybchenko KASSERT(txq->n_pend_desc <= txq->max_pkt_desc, 346e948693eSPhilip Paeps ("txq->n_pend_desc too large")); 347e948693eSPhilip Paeps KASSERT(!txq->blocked, ("txq->blocked")); 348e948693eSPhilip Paeps 349e948693eSPhilip Paeps old_added = txq->added; 350e948693eSPhilip Paeps 351e948693eSPhilip Paeps /* Post the fragment list. */ 3523c838a9fSAndrew Rybchenko rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc, 353e948693eSPhilip Paeps txq->reaped, &txq->added); 3543c838a9fSAndrew Rybchenko KASSERT(rc == 0, ("efx_tx_qdesc_post() failed")); 355e948693eSPhilip Paeps 3563c838a9fSAndrew Rybchenko /* If efx_tx_qdesc_post() had to refragment, our information about 357e948693eSPhilip Paeps * buffers to free may be associated with the wrong 358e948693eSPhilip Paeps * descriptors. 359e948693eSPhilip Paeps */ 360e948693eSPhilip Paeps KASSERT(txq->added - old_added == txq->n_pend_desc, 3613c838a9fSAndrew Rybchenko ("efx_tx_qdesc_post() refragmented descriptors")); 362e948693eSPhilip Paeps 363e948693eSPhilip Paeps level = txq->added - txq->reaped; 364385b1d8eSGeorge V. Neville-Neil KASSERT(level <= txq->entries, ("overfilled TX queue")); 365e948693eSPhilip Paeps 366e948693eSPhilip Paeps /* Clear the fragment list. */ 367e948693eSPhilip Paeps txq->n_pend_desc = 0; 368e948693eSPhilip Paeps 3693c838a9fSAndrew Rybchenko /* 3703c838a9fSAndrew Rybchenko * Set the block level to ensure there is space to generate a 3713c838a9fSAndrew Rybchenko * large number of descriptors for TSO. 3723c838a9fSAndrew Rybchenko */ 3733c838a9fSAndrew Rybchenko block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc; 3743c838a9fSAndrew Rybchenko 375e948693eSPhilip Paeps /* Have we reached the block level? */ 3763c838a9fSAndrew Rybchenko if (level < block_level) 377e948693eSPhilip Paeps return; 378e948693eSPhilip Paeps 379e948693eSPhilip Paeps /* Reap, and check again */ 380e948693eSPhilip Paeps sfxge_tx_qreap(txq); 381e948693eSPhilip Paeps level = txq->added - txq->reaped; 3823c838a9fSAndrew Rybchenko if (level < block_level) 383e948693eSPhilip Paeps return; 384e948693eSPhilip Paeps 385e948693eSPhilip Paeps txq->blocked = 1; 386e948693eSPhilip Paeps 387e948693eSPhilip Paeps /* 388e948693eSPhilip Paeps * Avoid a race with completion interrupt handling that could leave 389e948693eSPhilip Paeps * the queue blocked. 390e948693eSPhilip Paeps */ 391e948693eSPhilip Paeps mb(); 392e948693eSPhilip Paeps sfxge_tx_qreap(txq); 393e948693eSPhilip Paeps level = txq->added - txq->reaped; 3943c838a9fSAndrew Rybchenko if (level < block_level) { 395e948693eSPhilip Paeps mb(); 396e948693eSPhilip Paeps txq->blocked = 0; 397e948693eSPhilip Paeps } 398e948693eSPhilip Paeps } 399e948693eSPhilip Paeps 400e948693eSPhilip Paeps static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 401e948693eSPhilip Paeps { 402e948693eSPhilip Paeps bus_dmamap_t *used_map; 403e948693eSPhilip Paeps bus_dmamap_t map; 404e948693eSPhilip Paeps bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 405e948693eSPhilip Paeps unsigned int id; 406e948693eSPhilip Paeps struct sfxge_tx_mapping *stmp; 4073c838a9fSAndrew Rybchenko efx_desc_t *desc; 408e948693eSPhilip Paeps int n_dma_seg; 409e948693eSPhilip Paeps int rc; 410e948693eSPhilip Paeps int i; 4113c838a9fSAndrew Rybchenko int eop; 4128b447157SAndrew Rybchenko uint16_t hw_cksum_flags_prev; 413ad72d030SAndrew Rybchenko uint16_t hw_vlan_tci_prev; 4148b447157SAndrew Rybchenko int n_extra_descs; 415e948693eSPhilip Paeps 416e948693eSPhilip Paeps KASSERT(!txq->blocked, ("txq->blocked")); 417e948693eSPhilip Paeps 418e3ef7bb2SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY 419e3ef7bb2SAndrew Rybchenko /* 420e3ef7bb2SAndrew Rybchenko * If software TSO is used, we still need to copy packet header, 421e3ef7bb2SAndrew Rybchenko * even if we have already parsed it early before enqueue. 422e3ef7bb2SAndrew Rybchenko */ 423e3ef7bb2SAndrew Rybchenko if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) && 424e3ef7bb2SAndrew Rybchenko (txq->tso_fw_assisted == 0)) 425e3ef7bb2SAndrew Rybchenko prefetch_read_many(mbuf->m_data); 426e3ef7bb2SAndrew Rybchenko #else 427e3ef7bb2SAndrew Rybchenko /* 428e3ef7bb2SAndrew Rybchenko * Prefetch packet header since we need to parse it and extract 429e3ef7bb2SAndrew Rybchenko * IP ID, TCP sequence number and flags. 430e3ef7bb2SAndrew Rybchenko */ 431e948693eSPhilip Paeps if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 432e948693eSPhilip Paeps prefetch_read_many(mbuf->m_data); 433e3ef7bb2SAndrew Rybchenko #endif 434e948693eSPhilip Paeps 435851128b8SAndrew Rybchenko if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { 436e948693eSPhilip Paeps rc = EINTR; 437e948693eSPhilip Paeps goto reject; 438e948693eSPhilip Paeps } 439e948693eSPhilip Paeps 440e948693eSPhilip Paeps /* Load the packet for DMA. */ 441385b1d8eSGeorge V. Neville-Neil id = txq->added & txq->ptr_mask; 442e948693eSPhilip Paeps stmp = &txq->stmp[id]; 443e948693eSPhilip Paeps rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 444e948693eSPhilip Paeps mbuf, dma_seg, &n_dma_seg, 0); 445e948693eSPhilip Paeps if (rc == EFBIG) { 446e948693eSPhilip Paeps /* Try again. */ 447c6499eccSGleb Smirnoff struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 448e948693eSPhilip Paeps SFXGE_TX_MAPPING_MAX_SEG); 449e948693eSPhilip Paeps if (new_mbuf == NULL) 450e948693eSPhilip Paeps goto reject; 451e948693eSPhilip Paeps ++txq->collapses; 452e948693eSPhilip Paeps mbuf = new_mbuf; 453e948693eSPhilip Paeps rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 454e948693eSPhilip Paeps stmp->map, mbuf, 455e948693eSPhilip Paeps dma_seg, &n_dma_seg, 0); 456e948693eSPhilip Paeps } 457e948693eSPhilip Paeps if (rc != 0) 458e948693eSPhilip Paeps goto reject; 459e948693eSPhilip Paeps 460e948693eSPhilip Paeps /* Make the packet visible to the hardware. */ 461e948693eSPhilip Paeps bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 462e948693eSPhilip Paeps 463e948693eSPhilip Paeps used_map = &stmp->map; 464e948693eSPhilip Paeps 4658b447157SAndrew Rybchenko hw_cksum_flags_prev = txq->hw_cksum_flags; 466ad72d030SAndrew Rybchenko hw_vlan_tci_prev = txq->hw_vlan_tci; 467ad72d030SAndrew Rybchenko 4688b447157SAndrew Rybchenko /* 4698b447157SAndrew Rybchenko * The order of option descriptors, which are used to leverage VLAN tag 4708b447157SAndrew Rybchenko * and checksum offloads, might be important. Changing checksum offload 4718b447157SAndrew Rybchenko * between VLAN option and packet descriptors probably does not work. 4728b447157SAndrew Rybchenko */ 4738b447157SAndrew Rybchenko n_extra_descs = sfxge_tx_maybe_toggle_cksum_offload(txq, mbuf, &stmp); 4748b447157SAndrew Rybchenko n_extra_descs += sfxge_tx_maybe_insert_tag(txq, mbuf, &stmp); 4758b447157SAndrew Rybchenko 476e948693eSPhilip Paeps if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 4778b447157SAndrew Rybchenko rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, 4788b447157SAndrew Rybchenko n_extra_descs); 479e948693eSPhilip Paeps if (rc < 0) 480e948693eSPhilip Paeps goto reject_mapped; 4813c838a9fSAndrew Rybchenko stmp = &txq->stmp[(rc - 1) & txq->ptr_mask]; 482e948693eSPhilip Paeps } else { 483e948693eSPhilip Paeps /* Add the mapping to the fragment list, and set flags 484e948693eSPhilip Paeps * for the buffer. 485e948693eSPhilip Paeps */ 4863c838a9fSAndrew Rybchenko 487e948693eSPhilip Paeps i = 0; 488e948693eSPhilip Paeps for (;;) { 4898b447157SAndrew Rybchenko desc = &txq->pend_desc[i + n_extra_descs]; 4903c838a9fSAndrew Rybchenko eop = (i == n_dma_seg - 1); 4913c838a9fSAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, 4923c838a9fSAndrew Rybchenko dma_seg[i].ds_addr, 4933c838a9fSAndrew Rybchenko dma_seg[i].ds_len, 4943c838a9fSAndrew Rybchenko eop, 4953c838a9fSAndrew Rybchenko desc); 4963c838a9fSAndrew Rybchenko if (eop) 497e948693eSPhilip Paeps break; 498e948693eSPhilip Paeps i++; 4993c838a9fSAndrew Rybchenko sfxge_next_stmp(txq, &stmp); 500e948693eSPhilip Paeps } 5018b447157SAndrew Rybchenko txq->n_pend_desc = n_dma_seg + n_extra_descs; 502e948693eSPhilip Paeps } 503e948693eSPhilip Paeps 504e948693eSPhilip Paeps /* 505e948693eSPhilip Paeps * If the mapping required more than one descriptor 506e948693eSPhilip Paeps * then we need to associate the DMA map with the last 507e948693eSPhilip Paeps * descriptor, not the first. 508e948693eSPhilip Paeps */ 509e948693eSPhilip Paeps if (used_map != &stmp->map) { 510e948693eSPhilip Paeps map = stmp->map; 511e948693eSPhilip Paeps stmp->map = *used_map; 512e948693eSPhilip Paeps *used_map = map; 513e948693eSPhilip Paeps } 514e948693eSPhilip Paeps 515e948693eSPhilip Paeps stmp->u.mbuf = mbuf; 516e948693eSPhilip Paeps stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 517e948693eSPhilip Paeps 518e948693eSPhilip Paeps /* Post the fragment list. */ 519e948693eSPhilip Paeps sfxge_tx_qlist_post(txq); 520e948693eSPhilip Paeps 521b7b0edd1SGeorge V. Neville-Neil return (0); 522e948693eSPhilip Paeps 523e948693eSPhilip Paeps reject_mapped: 524ad72d030SAndrew Rybchenko txq->hw_vlan_tci = hw_vlan_tci_prev; 5258b447157SAndrew Rybchenko txq->hw_cksum_flags = hw_cksum_flags_prev; 526e948693eSPhilip Paeps bus_dmamap_unload(txq->packet_dma_tag, *used_map); 527e948693eSPhilip Paeps reject: 528e948693eSPhilip Paeps /* Drop the packet on the floor. */ 529e948693eSPhilip Paeps m_freem(mbuf); 530e948693eSPhilip Paeps ++txq->drops; 531e948693eSPhilip Paeps 532b7b0edd1SGeorge V. Neville-Neil return (rc); 533e948693eSPhilip Paeps } 534e948693eSPhilip Paeps 535e948693eSPhilip Paeps /* 536e948693eSPhilip Paeps * Drain the deferred packet list into the transmit queue. 537e948693eSPhilip Paeps */ 538e948693eSPhilip Paeps static void 539e948693eSPhilip Paeps sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 540e948693eSPhilip Paeps { 541e948693eSPhilip Paeps struct sfxge_softc *sc; 542e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 543e948693eSPhilip Paeps struct mbuf *mbuf, *next; 544e948693eSPhilip Paeps unsigned int count; 54593929f25SAndrew Rybchenko unsigned int non_tcp_count; 546e948693eSPhilip Paeps unsigned int pushed; 547e948693eSPhilip Paeps int rc; 548e948693eSPhilip Paeps 549763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 550e948693eSPhilip Paeps 551e948693eSPhilip Paeps sc = txq->sc; 552e948693eSPhilip Paeps stdp = &txq->dpl; 553e948693eSPhilip Paeps pushed = txq->added; 554e948693eSPhilip Paeps 55506824d2cSAndrew Rybchenko if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) { 556e948693eSPhilip Paeps prefetch_read_many(sc->enp); 557e948693eSPhilip Paeps prefetch_read_many(txq->common); 55806824d2cSAndrew Rybchenko } 559e948693eSPhilip Paeps 560e948693eSPhilip Paeps mbuf = stdp->std_get; 561bc85c897SGeorge V. Neville-Neil count = stdp->std_get_count; 56293929f25SAndrew Rybchenko non_tcp_count = stdp->std_get_non_tcp_count; 56393929f25SAndrew Rybchenko 56493929f25SAndrew Rybchenko if (count > stdp->std_get_hiwat) 56593929f25SAndrew Rybchenko stdp->std_get_hiwat = count; 566e948693eSPhilip Paeps 567e948693eSPhilip Paeps while (count != 0) { 568e948693eSPhilip Paeps KASSERT(mbuf != NULL, ("mbuf == NULL")); 569e948693eSPhilip Paeps 570e948693eSPhilip Paeps next = mbuf->m_nextpkt; 571e948693eSPhilip Paeps mbuf->m_nextpkt = NULL; 572e948693eSPhilip Paeps 573e948693eSPhilip Paeps ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 574e948693eSPhilip Paeps 575e948693eSPhilip Paeps if (next != NULL) 576e948693eSPhilip Paeps prefetch_read_many(next); 577e948693eSPhilip Paeps 578e948693eSPhilip Paeps rc = sfxge_tx_queue_mbuf(txq, mbuf); 579e948693eSPhilip Paeps --count; 58093929f25SAndrew Rybchenko non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); 581e948693eSPhilip Paeps mbuf = next; 582e948693eSPhilip Paeps if (rc != 0) 583e948693eSPhilip Paeps continue; 584e948693eSPhilip Paeps 585e948693eSPhilip Paeps if (txq->blocked) 586e948693eSPhilip Paeps break; 587e948693eSPhilip Paeps 588e948693eSPhilip Paeps /* Push the fragments to the hardware in batches. */ 589e948693eSPhilip Paeps if (txq->added - pushed >= SFXGE_TX_BATCH) { 5903c838a9fSAndrew Rybchenko efx_tx_qpush(txq->common, txq->added, pushed); 591e948693eSPhilip Paeps pushed = txq->added; 592e948693eSPhilip Paeps } 593e948693eSPhilip Paeps } 594e948693eSPhilip Paeps 595e948693eSPhilip Paeps if (count == 0) { 596e948693eSPhilip Paeps KASSERT(mbuf == NULL, ("mbuf != NULL")); 59793929f25SAndrew Rybchenko KASSERT(non_tcp_count == 0, 59893929f25SAndrew Rybchenko ("inconsistent TCP/non-TCP detection")); 599e948693eSPhilip Paeps stdp->std_get = NULL; 600bc85c897SGeorge V. Neville-Neil stdp->std_get_count = 0; 60193929f25SAndrew Rybchenko stdp->std_get_non_tcp_count = 0; 602e948693eSPhilip Paeps stdp->std_getp = &stdp->std_get; 603e948693eSPhilip Paeps } else { 604e948693eSPhilip Paeps stdp->std_get = mbuf; 605bc85c897SGeorge V. Neville-Neil stdp->std_get_count = count; 60693929f25SAndrew Rybchenko stdp->std_get_non_tcp_count = non_tcp_count; 607e948693eSPhilip Paeps } 608e948693eSPhilip Paeps 609e948693eSPhilip Paeps if (txq->added != pushed) 6103c838a9fSAndrew Rybchenko efx_tx_qpush(txq->common, txq->added, pushed); 611e948693eSPhilip Paeps 612bc85c897SGeorge V. Neville-Neil KASSERT(txq->blocked || stdp->std_get_count == 0, 613e948693eSPhilip Paeps ("queue unblocked but count is non-zero")); 614e948693eSPhilip Paeps } 615e948693eSPhilip Paeps 6163c838a9fSAndrew Rybchenko #define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0) 617e948693eSPhilip Paeps 618e948693eSPhilip Paeps /* 619e948693eSPhilip Paeps * Service the deferred packet list. 620e948693eSPhilip Paeps * 621e948693eSPhilip Paeps * NOTE: drops the txq mutex! 622e948693eSPhilip Paeps */ 6230b28bbdcSAndrew Rybchenko static void 624e948693eSPhilip Paeps sfxge_tx_qdpl_service(struct sfxge_txq *txq) 625e948693eSPhilip Paeps { 626763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 627e948693eSPhilip Paeps 628e948693eSPhilip Paeps do { 629e948693eSPhilip Paeps if (SFXGE_TX_QDPL_PENDING(txq)) 630e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(txq); 631e948693eSPhilip Paeps 632e948693eSPhilip Paeps if (!txq->blocked) 633e948693eSPhilip Paeps sfxge_tx_qdpl_drain(txq); 634e948693eSPhilip Paeps 635763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 636e948693eSPhilip Paeps } while (SFXGE_TX_QDPL_PENDING(txq) && 637763cab71SAndrew Rybchenko SFXGE_TXQ_TRYLOCK(txq)); 638e948693eSPhilip Paeps } 639e948693eSPhilip Paeps 640e948693eSPhilip Paeps /* 641d6e9f736SAndrew Rybchenko * Put a packet on the deferred packet get-list. 642e948693eSPhilip Paeps */ 6430b28bbdcSAndrew Rybchenko static int 644d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf) 645e948693eSPhilip Paeps { 646e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 647e948693eSPhilip Paeps 648e948693eSPhilip Paeps stdp = &txq->dpl; 649e948693eSPhilip Paeps 650e948693eSPhilip Paeps KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 651e948693eSPhilip Paeps 652763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 653e948693eSPhilip Paeps 65493929f25SAndrew Rybchenko if (stdp->std_get_count >= stdp->std_get_max) { 65593929f25SAndrew Rybchenko txq->get_overflow++; 656c1974e29SGleb Smirnoff return (ENOBUFS); 65793929f25SAndrew Rybchenko } 65893929f25SAndrew Rybchenko if (sfxge_is_mbuf_non_tcp(mbuf)) { 65993929f25SAndrew Rybchenko if (stdp->std_get_non_tcp_count >= 66093929f25SAndrew Rybchenko stdp->std_get_non_tcp_max) { 66193929f25SAndrew Rybchenko txq->get_non_tcp_overflow++; 66293929f25SAndrew Rybchenko return (ENOBUFS); 66393929f25SAndrew Rybchenko } 66493929f25SAndrew Rybchenko stdp->std_get_non_tcp_count++; 66593929f25SAndrew Rybchenko } 666c1974e29SGleb Smirnoff 667e948693eSPhilip Paeps *(stdp->std_getp) = mbuf; 668e948693eSPhilip Paeps stdp->std_getp = &mbuf->m_nextpkt; 669bc85c897SGeorge V. Neville-Neil stdp->std_get_count++; 670d6e9f736SAndrew Rybchenko 671d6e9f736SAndrew Rybchenko return (0); 672d6e9f736SAndrew Rybchenko } 673d6e9f736SAndrew Rybchenko 674d6e9f736SAndrew Rybchenko /* 675d6e9f736SAndrew Rybchenko * Put a packet on the deferred packet put-list. 676d6e9f736SAndrew Rybchenko * 677d6e9f736SAndrew Rybchenko * We overload the csum_data field in the mbuf to keep track of this length 678d6e9f736SAndrew Rybchenko * because there is no cheap alternative to avoid races. 679d6e9f736SAndrew Rybchenko */ 680d6e9f736SAndrew Rybchenko static int 681d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf) 682d6e9f736SAndrew Rybchenko { 683d6e9f736SAndrew Rybchenko struct sfxge_tx_dpl *stdp; 684e948693eSPhilip Paeps volatile uintptr_t *putp; 685e948693eSPhilip Paeps uintptr_t old; 686e948693eSPhilip Paeps uintptr_t new; 6871e2b4cefSAndrew Rybchenko unsigned int put_count; 688e948693eSPhilip Paeps 689d6e9f736SAndrew Rybchenko KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 690d6e9f736SAndrew Rybchenko 691d6e9f736SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 692d6e9f736SAndrew Rybchenko 693d6e9f736SAndrew Rybchenko stdp = &txq->dpl; 694e948693eSPhilip Paeps putp = &stdp->std_put; 695e948693eSPhilip Paeps new = (uintptr_t)mbuf; 696e948693eSPhilip Paeps 697e948693eSPhilip Paeps do { 698e948693eSPhilip Paeps old = *putp; 699b7b0edd1SGeorge V. Neville-Neil if (old != 0) { 700e948693eSPhilip Paeps struct mbuf *mp = (struct mbuf *)old; 7011e2b4cefSAndrew Rybchenko put_count = mp->m_pkthdr.csum_data; 702e948693eSPhilip Paeps } else 7031e2b4cefSAndrew Rybchenko put_count = 0; 7041e2b4cefSAndrew Rybchenko if (put_count >= stdp->std_put_max) { 70593929f25SAndrew Rybchenko atomic_add_long(&txq->put_overflow, 1); 706c1974e29SGleb Smirnoff return (ENOBUFS); 70793929f25SAndrew Rybchenko } 7081e2b4cefSAndrew Rybchenko mbuf->m_pkthdr.csum_data = put_count + 1; 709e948693eSPhilip Paeps mbuf->m_nextpkt = (void *)old; 710fb8ccc78SMarius Strobl } while (atomic_cmpset_ptr(putp, old, new) == 0); 711e948693eSPhilip Paeps 712e948693eSPhilip Paeps return (0); 713e948693eSPhilip Paeps } 714e948693eSPhilip Paeps 715e948693eSPhilip Paeps /* 716e948693eSPhilip Paeps * Called from if_transmit - will try to grab the txq lock and enqueue to the 717c071447aSAndrew Rybchenko * put list if it succeeds, otherwise try to push onto the defer list if space. 718e948693eSPhilip Paeps */ 7193c838a9fSAndrew Rybchenko static int 720e948693eSPhilip Paeps sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 721e948693eSPhilip Paeps { 722e948693eSPhilip Paeps int rc; 723e948693eSPhilip Paeps 724d7ac87d3SGleb Smirnoff if (!SFXGE_LINK_UP(txq->sc)) { 72593929f25SAndrew Rybchenko atomic_add_long(&txq->netdown_drops, 1); 7260e4ebe6cSAndrew Rybchenko return (ENETDOWN); 727d7ac87d3SGleb Smirnoff } 728d7ac87d3SGleb Smirnoff 729e948693eSPhilip Paeps /* 730e948693eSPhilip Paeps * Try to grab the txq lock. If we are able to get the lock, 731e948693eSPhilip Paeps * the packet will be appended to the "get list" of the deferred 732e948693eSPhilip Paeps * packet list. Otherwise, it will be pushed on the "put list". 733e948693eSPhilip Paeps */ 734deee1de4SAndrew Rybchenko if (SFXGE_TXQ_TRYLOCK(txq)) { 735d6e9f736SAndrew Rybchenko /* First swizzle put-list to get-list to keep order */ 736d6e9f736SAndrew Rybchenko sfxge_tx_qdpl_swizzle(txq); 737d6e9f736SAndrew Rybchenko 738d6e9f736SAndrew Rybchenko rc = sfxge_tx_qdpl_put_locked(txq, m); 739deee1de4SAndrew Rybchenko 740deee1de4SAndrew Rybchenko /* Try to service the list. */ 741deee1de4SAndrew Rybchenko sfxge_tx_qdpl_service(txq); 742deee1de4SAndrew Rybchenko /* Lock has been dropped. */ 743d6e9f736SAndrew Rybchenko } else { 744d6e9f736SAndrew Rybchenko rc = sfxge_tx_qdpl_put_unlocked(txq, m); 745e948693eSPhilip Paeps 746e948693eSPhilip Paeps /* 747e948693eSPhilip Paeps * Try to grab the lock again. 748e948693eSPhilip Paeps * 749d6e9f736SAndrew Rybchenko * If we are able to get the lock, we need to process 750d6e9f736SAndrew Rybchenko * the deferred packet list. If we are not able to get 751d6e9f736SAndrew Rybchenko * the lock, another thread is processing the list. 752e948693eSPhilip Paeps */ 753f080384cSAndrew Rybchenko if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) { 754e948693eSPhilip Paeps sfxge_tx_qdpl_service(txq); 755e948693eSPhilip Paeps /* Lock has been dropped. */ 756e948693eSPhilip Paeps } 757deee1de4SAndrew Rybchenko } 758deee1de4SAndrew Rybchenko 759deee1de4SAndrew Rybchenko SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 760e948693eSPhilip Paeps 761f080384cSAndrew Rybchenko return (rc); 762e948693eSPhilip Paeps } 763e948693eSPhilip Paeps 764e948693eSPhilip Paeps static void 765e948693eSPhilip Paeps sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 766e948693eSPhilip Paeps { 767e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp = &txq->dpl; 768e948693eSPhilip Paeps struct mbuf *mbuf, *next; 769e948693eSPhilip Paeps 770763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 771e948693eSPhilip Paeps 772e948693eSPhilip Paeps sfxge_tx_qdpl_swizzle(txq); 773e948693eSPhilip Paeps for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 774e948693eSPhilip Paeps next = mbuf->m_nextpkt; 775e948693eSPhilip Paeps m_freem(mbuf); 776e948693eSPhilip Paeps } 777e948693eSPhilip Paeps stdp->std_get = NULL; 778bc85c897SGeorge V. Neville-Neil stdp->std_get_count = 0; 77993929f25SAndrew Rybchenko stdp->std_get_non_tcp_count = 0; 780e948693eSPhilip Paeps stdp->std_getp = &stdp->std_get; 781e948693eSPhilip Paeps 782763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 783e948693eSPhilip Paeps } 784e948693eSPhilip Paeps 785e948693eSPhilip Paeps void 786e948693eSPhilip Paeps sfxge_if_qflush(struct ifnet *ifp) 787e948693eSPhilip Paeps { 788e948693eSPhilip Paeps struct sfxge_softc *sc; 789cb552e88SAndrew Rybchenko unsigned int i; 790e948693eSPhilip Paeps 791e948693eSPhilip Paeps sc = ifp->if_softc; 792e948693eSPhilip Paeps 793e2b05fe2SAndrew Rybchenko for (i = 0; i < sc->txq_count; i++) 794e948693eSPhilip Paeps sfxge_tx_qdpl_flush(sc->txq[i]); 795e948693eSPhilip Paeps } 796e948693eSPhilip Paeps 797a32efb97SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY 798a32efb97SAndrew Rybchenko 799a32efb97SAndrew Rybchenko /* There is little space for user data in mbuf pkthdr, so we 800a32efb97SAndrew Rybchenko * use l*hlen fields which are not used by the driver otherwise 801a32efb97SAndrew Rybchenko * to store header offsets. 802a32efb97SAndrew Rybchenko * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes. 803a32efb97SAndrew Rybchenko */ 804a32efb97SAndrew Rybchenko 805a32efb97SAndrew Rybchenko 806a32efb97SAndrew Rybchenko #define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0]) 807a32efb97SAndrew Rybchenko /* We abuse l5hlen here because PH_loc can hold only 64 bits of data */ 808a32efb97SAndrew Rybchenko #define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen) 809a32efb97SAndrew Rybchenko #define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1]) 810a32efb97SAndrew Rybchenko #define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1]) 811a32efb97SAndrew Rybchenko 812a32efb97SAndrew Rybchenko static void sfxge_parse_tx_packet(struct mbuf *mbuf) 813a32efb97SAndrew Rybchenko { 814a32efb97SAndrew Rybchenko struct ether_header *eh = mtod(mbuf, struct ether_header *); 815a32efb97SAndrew Rybchenko const struct tcphdr *th; 816a32efb97SAndrew Rybchenko struct tcphdr th_copy; 817a32efb97SAndrew Rybchenko 818a32efb97SAndrew Rybchenko /* Find network protocol and header */ 819a32efb97SAndrew Rybchenko TSO_MBUF_PROTO(mbuf) = eh->ether_type; 820a32efb97SAndrew Rybchenko if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) { 821a32efb97SAndrew Rybchenko struct ether_vlan_header *veh = 822a32efb97SAndrew Rybchenko mtod(mbuf, struct ether_vlan_header *); 823a32efb97SAndrew Rybchenko TSO_MBUF_PROTO(mbuf) = veh->evl_proto; 824a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l2hlen = sizeof(*veh); 825a32efb97SAndrew Rybchenko } else { 826a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l2hlen = sizeof(*eh); 827a32efb97SAndrew Rybchenko } 828a32efb97SAndrew Rybchenko 829a32efb97SAndrew Rybchenko /* Find TCP header */ 830a32efb97SAndrew Rybchenko if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) { 831a32efb97SAndrew Rybchenko const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen); 832a32efb97SAndrew Rybchenko 833a32efb97SAndrew Rybchenko KASSERT(iph->ip_p == IPPROTO_TCP, 834a32efb97SAndrew Rybchenko ("TSO required on non-TCP packet")); 835a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl; 836a32efb97SAndrew Rybchenko TSO_MBUF_PACKETID(mbuf) = iph->ip_id; 837a32efb97SAndrew Rybchenko } else { 838a32efb97SAndrew Rybchenko KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6), 839a32efb97SAndrew Rybchenko ("TSO required on non-IP packet")); 840a32efb97SAndrew Rybchenko KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt == 841a32efb97SAndrew Rybchenko IPPROTO_TCP, 842a32efb97SAndrew Rybchenko ("TSO required on non-TCP packet")); 843a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr); 844a32efb97SAndrew Rybchenko TSO_MBUF_PACKETID(mbuf) = 0; 845a32efb97SAndrew Rybchenko } 846a32efb97SAndrew Rybchenko 847a32efb97SAndrew Rybchenko KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen, 848a32efb97SAndrew Rybchenko ("network header is fragmented in mbuf")); 849a32efb97SAndrew Rybchenko 850a32efb97SAndrew Rybchenko /* We need TCP header including flags (window is the next) */ 851a32efb97SAndrew Rybchenko if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) { 852a32efb97SAndrew Rybchenko m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy), 853a32efb97SAndrew Rybchenko (caddr_t)&th_copy); 854a32efb97SAndrew Rybchenko th = &th_copy; 855a32efb97SAndrew Rybchenko } else { 856a32efb97SAndrew Rybchenko th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen); 857a32efb97SAndrew Rybchenko } 858a32efb97SAndrew Rybchenko 859a32efb97SAndrew Rybchenko mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off; 860a32efb97SAndrew Rybchenko TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq); 861a32efb97SAndrew Rybchenko 862a32efb97SAndrew Rybchenko /* These flags must not be duplicated */ 863a32efb97SAndrew Rybchenko /* 864a32efb97SAndrew Rybchenko * RST should not be duplicated as well, but FreeBSD kernel 865a32efb97SAndrew Rybchenko * generates TSO packets with RST flag. So, do not assert 866a32efb97SAndrew Rybchenko * its absence. 867a32efb97SAndrew Rybchenko */ 868a32efb97SAndrew Rybchenko KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 869a32efb97SAndrew Rybchenko ("incompatible TCP flag 0x%x on TSO packet", 870a32efb97SAndrew Rybchenko th->th_flags & (TH_URG | TH_SYN))); 871a32efb97SAndrew Rybchenko TSO_MBUF_FLAGS(mbuf) = th->th_flags; 872a32efb97SAndrew Rybchenko } 873a32efb97SAndrew Rybchenko #endif 874a32efb97SAndrew Rybchenko 875e948693eSPhilip Paeps /* 876e948693eSPhilip Paeps * TX start -- called by the stack. 877e948693eSPhilip Paeps */ 878e948693eSPhilip Paeps int 879e948693eSPhilip Paeps sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 880e948693eSPhilip Paeps { 881e948693eSPhilip Paeps struct sfxge_softc *sc; 882e948693eSPhilip Paeps struct sfxge_txq *txq; 883e948693eSPhilip Paeps int rc; 884e948693eSPhilip Paeps 885e948693eSPhilip Paeps sc = (struct sfxge_softc *)ifp->if_softc; 886e948693eSPhilip Paeps 88787581ab8SAndrew Rybchenko /* 88887581ab8SAndrew Rybchenko * Transmit may be called when interface is up from the kernel 88987581ab8SAndrew Rybchenko * point of view, but not yet up (in progress) from the driver 89087581ab8SAndrew Rybchenko * point of view. I.e. link aggregation bring up. 89187581ab8SAndrew Rybchenko * Transmit may be called when interface is up from the driver 89287581ab8SAndrew Rybchenko * point of view, but already down from the kernel point of 89387581ab8SAndrew Rybchenko * view. I.e. Rx when interface shutdown is in progress. 89487581ab8SAndrew Rybchenko */ 89587581ab8SAndrew Rybchenko KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP), 89687581ab8SAndrew Rybchenko ("interface not up")); 897e948693eSPhilip Paeps 898e948693eSPhilip Paeps /* Pick the desired transmit queue. */ 8998b447157SAndrew Rybchenko if (sc->txq_dynamic_cksum_toggle_supported | 9008b447157SAndrew Rybchenko (m->m_pkthdr.csum_flags & 9018b447157SAndrew Rybchenko (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO))) { 902e948693eSPhilip Paeps int index = 0; 903e948693eSPhilip Paeps 9043bbc1e08SAndrew Rybchenko #ifdef RSS 9053bbc1e08SAndrew Rybchenko uint32_t bucket_id; 9063bbc1e08SAndrew Rybchenko 9073bbc1e08SAndrew Rybchenko /* 9083bbc1e08SAndrew Rybchenko * Select a TX queue which matches the corresponding 9093bbc1e08SAndrew Rybchenko * RX queue for the hash in order to assign both 9103bbc1e08SAndrew Rybchenko * TX and RX parts of the flow to the same CPU 9113bbc1e08SAndrew Rybchenko */ 9123bbc1e08SAndrew Rybchenko if (rss_m2bucket(m, &bucket_id) == 0) 9133bbc1e08SAndrew Rybchenko index = bucket_id % (sc->txq_count - (SFXGE_TXQ_NTYPES - 1)); 9143bbc1e08SAndrew Rybchenko #else 915c2529042SHans Petter Selasky /* check if flowid is set */ 916c2529042SHans Petter Selasky if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 917e948693eSPhilip Paeps uint32_t hash = m->m_pkthdr.flowid; 91844fcad03SAndrew Rybchenko uint32_t idx = hash % nitems(sc->rx_indir_table); 919e948693eSPhilip Paeps 92044fcad03SAndrew Rybchenko index = sc->rx_indir_table[idx]; 921e948693eSPhilip Paeps } 9223bbc1e08SAndrew Rybchenko #endif 923a32efb97SAndrew Rybchenko #if SFXGE_TX_PARSE_EARLY 924a32efb97SAndrew Rybchenko if (m->m_pkthdr.csum_flags & CSUM_TSO) 925a32efb97SAndrew Rybchenko sfxge_parse_tx_packet(m); 926a32efb97SAndrew Rybchenko #endif 9278b447157SAndrew Rybchenko index += (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) ? 9288b447157SAndrew Rybchenko SFXGE_TXQ_IP_TCP_UDP_CKSUM : 0; 9298b447157SAndrew Rybchenko txq = sc->txq[index]; 930e948693eSPhilip Paeps } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 931e948693eSPhilip Paeps txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 932e948693eSPhilip Paeps } else { 933e948693eSPhilip Paeps txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 934e948693eSPhilip Paeps } 935e948693eSPhilip Paeps 936e948693eSPhilip Paeps rc = sfxge_tx_packet_add(txq, m); 9370e4ebe6cSAndrew Rybchenko if (rc != 0) 9380e4ebe6cSAndrew Rybchenko m_freem(m); 939e948693eSPhilip Paeps 940e948693eSPhilip Paeps return (rc); 941e948693eSPhilip Paeps } 942e948693eSPhilip Paeps 943e948693eSPhilip Paeps /* 944e948693eSPhilip Paeps * Software "TSO". Not quite as good as doing it in hardware, but 945e948693eSPhilip Paeps * still faster than segmenting in the stack. 946e948693eSPhilip Paeps */ 947e948693eSPhilip Paeps 948e948693eSPhilip Paeps struct sfxge_tso_state { 949e948693eSPhilip Paeps /* Output position */ 950e948693eSPhilip Paeps unsigned out_len; /* Remaining length in current segment */ 951e948693eSPhilip Paeps unsigned seqnum; /* Current sequence number */ 952e948693eSPhilip Paeps unsigned packet_space; /* Remaining space in current packet */ 953a45a0da1SAndrew Rybchenko unsigned segs_space; /* Remaining number of DMA segments 954a45a0da1SAndrew Rybchenko for the packet (FATSOv2 only) */ 955e948693eSPhilip Paeps 956e948693eSPhilip Paeps /* Input position */ 957e948693eSPhilip Paeps uint64_t dma_addr; /* DMA address of current position */ 958e948693eSPhilip Paeps unsigned in_len; /* Remaining length in current mbuf */ 959e948693eSPhilip Paeps 960e948693eSPhilip Paeps const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 961e948693eSPhilip Paeps u_short protocol; /* Network protocol (after VLAN decap) */ 962e948693eSPhilip Paeps ssize_t nh_off; /* Offset of network header */ 963e948693eSPhilip Paeps ssize_t tcph_off; /* Offset of TCP header */ 964e948693eSPhilip Paeps unsigned header_len; /* Number of bytes of header */ 965d0f73877SAndrew Rybchenko unsigned seg_size; /* TCP segment size */ 9663c838a9fSAndrew Rybchenko int fw_assisted; /* Use FW-assisted TSO */ 9673c838a9fSAndrew Rybchenko u_short packet_id; /* IPv4 packet ID from the original packet */ 968a32efb97SAndrew Rybchenko uint8_t tcp_flags; /* TCP flags */ 9693c838a9fSAndrew Rybchenko efx_desc_t header_desc; /* Precomputed header descriptor for 9703c838a9fSAndrew Rybchenko * FW-assisted TSO */ 971e948693eSPhilip Paeps }; 972e948693eSPhilip Paeps 973a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 9740b28bbdcSAndrew Rybchenko static const struct ip *tso_iph(const struct sfxge_tso_state *tso) 975e948693eSPhilip Paeps { 976e948693eSPhilip Paeps KASSERT(tso->protocol == htons(ETHERTYPE_IP), 977e948693eSPhilip Paeps ("tso_iph() in non-IPv4 state")); 978e948693eSPhilip Paeps return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 979e948693eSPhilip Paeps } 980a32efb97SAndrew Rybchenko 9810b28bbdcSAndrew Rybchenko static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 982e948693eSPhilip Paeps { 983e948693eSPhilip Paeps KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 984e948693eSPhilip Paeps ("tso_ip6h() in non-IPv6 state")); 985e948693eSPhilip Paeps return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 986e948693eSPhilip Paeps } 987a32efb97SAndrew Rybchenko 9880b28bbdcSAndrew Rybchenko static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 989e948693eSPhilip Paeps { 990e948693eSPhilip Paeps return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 991e948693eSPhilip Paeps } 992a32efb97SAndrew Rybchenko #endif 993a32efb97SAndrew Rybchenko 994e948693eSPhilip Paeps 995e948693eSPhilip Paeps /* Size of preallocated TSO header buffers. Larger blocks must be 996e948693eSPhilip Paeps * allocated from the heap. 997e948693eSPhilip Paeps */ 998e948693eSPhilip Paeps #define TSOH_STD_SIZE 128 999e948693eSPhilip Paeps 1000e948693eSPhilip Paeps /* At most half the descriptors in the queue at any time will refer to 1001e948693eSPhilip Paeps * a TSO header buffer, since they must always be followed by a 1002e948693eSPhilip Paeps * payload descriptor referring to an mbuf. 1003e948693eSPhilip Paeps */ 1004385b1d8eSGeorge V. Neville-Neil #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) 1005e948693eSPhilip Paeps #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 1006385b1d8eSGeorge V. Neville-Neil #define TSOH_PAGE_COUNT(_txq_entries) \ 1007057b4402SPedro F. Giffuni howmany(TSOH_COUNT(_txq_entries), TSOH_PER_PAGE) 1008e948693eSPhilip Paeps 1009e948693eSPhilip Paeps static int tso_init(struct sfxge_txq *txq) 1010e948693eSPhilip Paeps { 1011e948693eSPhilip Paeps struct sfxge_softc *sc = txq->sc; 1012385b1d8eSGeorge V. Neville-Neil unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); 1013e948693eSPhilip Paeps int i, rc; 1014e948693eSPhilip Paeps 1015e948693eSPhilip Paeps /* Allocate TSO header buffers */ 1016385b1d8eSGeorge V. Neville-Neil txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), 1017e948693eSPhilip Paeps M_SFXGE, M_WAITOK); 1018e948693eSPhilip Paeps 1019385b1d8eSGeorge V. Neville-Neil for (i = 0; i < tsoh_page_count; i++) { 1020e948693eSPhilip Paeps rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 1021b7b0edd1SGeorge V. Neville-Neil if (rc != 0) 1022e948693eSPhilip Paeps goto fail; 1023e948693eSPhilip Paeps } 1024e948693eSPhilip Paeps 1025b7b0edd1SGeorge V. Neville-Neil return (0); 1026e948693eSPhilip Paeps 1027e948693eSPhilip Paeps fail: 1028e948693eSPhilip Paeps while (i-- > 0) 1029e948693eSPhilip Paeps sfxge_dma_free(&txq->tsoh_buffer[i]); 1030e948693eSPhilip Paeps free(txq->tsoh_buffer, M_SFXGE); 1031e948693eSPhilip Paeps txq->tsoh_buffer = NULL; 1032b7b0edd1SGeorge V. Neville-Neil return (rc); 1033e948693eSPhilip Paeps } 1034e948693eSPhilip Paeps 1035e948693eSPhilip Paeps static void tso_fini(struct sfxge_txq *txq) 1036e948693eSPhilip Paeps { 1037e948693eSPhilip Paeps int i; 1038e948693eSPhilip Paeps 1039b7b0edd1SGeorge V. Neville-Neil if (txq->tsoh_buffer != NULL) { 1040385b1d8eSGeorge V. Neville-Neil for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) 1041e948693eSPhilip Paeps sfxge_dma_free(&txq->tsoh_buffer[i]); 1042e948693eSPhilip Paeps free(txq->tsoh_buffer, M_SFXGE); 1043e948693eSPhilip Paeps } 1044e948693eSPhilip Paeps } 1045e948693eSPhilip Paeps 10463c838a9fSAndrew Rybchenko static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso, 10473c838a9fSAndrew Rybchenko const bus_dma_segment_t *hdr_dma_seg, 10483c838a9fSAndrew Rybchenko struct mbuf *mbuf) 1049e948693eSPhilip Paeps { 10503c838a9fSAndrew Rybchenko const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp); 1051a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 1052a32efb97SAndrew Rybchenko struct ether_header *eh = mtod(mbuf, struct ether_header *); 1053b2c43c38SAndrew Rybchenko const struct tcphdr *th; 1054b2c43c38SAndrew Rybchenko struct tcphdr th_copy; 1055a32efb97SAndrew Rybchenko #endif 1056e948693eSPhilip Paeps 1057a45a0da1SAndrew Rybchenko tso->fw_assisted = txq->tso_fw_assisted; 1058e948693eSPhilip Paeps tso->mbuf = mbuf; 1059e948693eSPhilip Paeps 1060e948693eSPhilip Paeps /* Find network protocol and header */ 1061a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 1062e948693eSPhilip Paeps tso->protocol = eh->ether_type; 1063e948693eSPhilip Paeps if (tso->protocol == htons(ETHERTYPE_VLAN)) { 1064e948693eSPhilip Paeps struct ether_vlan_header *veh = 1065e948693eSPhilip Paeps mtod(mbuf, struct ether_vlan_header *); 1066e948693eSPhilip Paeps tso->protocol = veh->evl_proto; 1067e948693eSPhilip Paeps tso->nh_off = sizeof(*veh); 1068e948693eSPhilip Paeps } else { 1069e948693eSPhilip Paeps tso->nh_off = sizeof(*eh); 1070e948693eSPhilip Paeps } 1071a32efb97SAndrew Rybchenko #else 1072a32efb97SAndrew Rybchenko tso->protocol = TSO_MBUF_PROTO(mbuf); 1073a32efb97SAndrew Rybchenko tso->nh_off = mbuf->m_pkthdr.l2hlen; 1074a32efb97SAndrew Rybchenko tso->tcph_off = mbuf->m_pkthdr.l3hlen; 1075414dec5fSAndrew Rybchenko tso->packet_id = ntohs(TSO_MBUF_PACKETID(mbuf)); 1076a32efb97SAndrew Rybchenko #endif 1077e948693eSPhilip Paeps 1078a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 1079e948693eSPhilip Paeps /* Find TCP header */ 1080e948693eSPhilip Paeps if (tso->protocol == htons(ETHERTYPE_IP)) { 1081e948693eSPhilip Paeps KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 1082e948693eSPhilip Paeps ("TSO required on non-TCP packet")); 1083e948693eSPhilip Paeps tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 1084414dec5fSAndrew Rybchenko tso->packet_id = ntohs(tso_iph(tso)->ip_id); 1085e948693eSPhilip Paeps } else { 1086e948693eSPhilip Paeps KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 1087e948693eSPhilip Paeps ("TSO required on non-IP packet")); 1088e948693eSPhilip Paeps KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 1089e948693eSPhilip Paeps ("TSO required on non-TCP packet")); 1090e948693eSPhilip Paeps tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 10913c838a9fSAndrew Rybchenko tso->packet_id = 0; 10923c838a9fSAndrew Rybchenko } 1093a32efb97SAndrew Rybchenko #endif 1094a32efb97SAndrew Rybchenko 1095a32efb97SAndrew Rybchenko 10963c838a9fSAndrew Rybchenko if (tso->fw_assisted && 10973c838a9fSAndrew Rybchenko __predict_false(tso->tcph_off > 10983c838a9fSAndrew Rybchenko encp->enc_tx_tso_tcp_header_offset_limit)) { 10993c838a9fSAndrew Rybchenko tso->fw_assisted = 0; 1100e948693eSPhilip Paeps } 1101e948693eSPhilip Paeps 1102a32efb97SAndrew Rybchenko 1103a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 1104b2c43c38SAndrew Rybchenko KASSERT(mbuf->m_len >= tso->tcph_off, 1105b2c43c38SAndrew Rybchenko ("network header is fragmented in mbuf")); 1106b2c43c38SAndrew Rybchenko /* We need TCP header including flags (window is the next) */ 1107b2c43c38SAndrew Rybchenko if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { 1108b2c43c38SAndrew Rybchenko m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), 1109b2c43c38SAndrew Rybchenko (caddr_t)&th_copy); 1110b2c43c38SAndrew Rybchenko th = &th_copy; 1111b2c43c38SAndrew Rybchenko } else { 1112b2c43c38SAndrew Rybchenko th = tso_tcph(tso); 1113b2c43c38SAndrew Rybchenko } 1114b2c43c38SAndrew Rybchenko tso->header_len = tso->tcph_off + 4 * th->th_off; 1115a32efb97SAndrew Rybchenko #else 1116a32efb97SAndrew Rybchenko tso->header_len = mbuf->m_pkthdr.l4hlen; 1117a32efb97SAndrew Rybchenko #endif 1118d0f73877SAndrew Rybchenko tso->seg_size = mbuf->m_pkthdr.tso_segsz; 1119e948693eSPhilip Paeps 1120a32efb97SAndrew Rybchenko #if !SFXGE_TX_PARSE_EARLY 1121b2c43c38SAndrew Rybchenko tso->seqnum = ntohl(th->th_seq); 1122e948693eSPhilip Paeps 1123e948693eSPhilip Paeps /* These flags must not be duplicated */ 11241217b24eSAndrew Rybchenko /* 11251217b24eSAndrew Rybchenko * RST should not be duplicated as well, but FreeBSD kernel 11261217b24eSAndrew Rybchenko * generates TSO packets with RST flag. So, do not assert 11271217b24eSAndrew Rybchenko * its absence. 11281217b24eSAndrew Rybchenko */ 11291217b24eSAndrew Rybchenko KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 11301217b24eSAndrew Rybchenko ("incompatible TCP flag 0x%x on TSO packet", 11311217b24eSAndrew Rybchenko th->th_flags & (TH_URG | TH_SYN))); 1132a32efb97SAndrew Rybchenko tso->tcp_flags = th->th_flags; 1133a32efb97SAndrew Rybchenko #else 1134a32efb97SAndrew Rybchenko tso->seqnum = TSO_MBUF_SEQNUM(mbuf); 1135a32efb97SAndrew Rybchenko tso->tcp_flags = TSO_MBUF_FLAGS(mbuf); 1136a32efb97SAndrew Rybchenko #endif 1137e948693eSPhilip Paeps 1138e948693eSPhilip Paeps tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 11393c838a9fSAndrew Rybchenko 11403c838a9fSAndrew Rybchenko if (tso->fw_assisted) { 11413c838a9fSAndrew Rybchenko if (hdr_dma_seg->ds_len >= tso->header_len) 11423c838a9fSAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, 11433c838a9fSAndrew Rybchenko hdr_dma_seg->ds_addr, 11443c838a9fSAndrew Rybchenko tso->header_len, 11453c838a9fSAndrew Rybchenko B_FALSE, 11463c838a9fSAndrew Rybchenko &tso->header_desc); 11473c838a9fSAndrew Rybchenko else 11483c838a9fSAndrew Rybchenko tso->fw_assisted = 0; 11493c838a9fSAndrew Rybchenko } 1150e948693eSPhilip Paeps } 1151e948693eSPhilip Paeps 1152e948693eSPhilip Paeps /* 1153e948693eSPhilip Paeps * tso_fill_packet_with_fragment - form descriptors for the current fragment 1154e948693eSPhilip Paeps * 1155e948693eSPhilip Paeps * Form descriptors for the current fragment, until we reach the end 1156e948693eSPhilip Paeps * of fragment or end-of-packet. Return 0 on success, 1 if not enough 1157e948693eSPhilip Paeps * space. 1158e948693eSPhilip Paeps */ 1159e948693eSPhilip Paeps static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 1160e948693eSPhilip Paeps struct sfxge_tso_state *tso) 1161e948693eSPhilip Paeps { 11623c838a9fSAndrew Rybchenko efx_desc_t *desc; 1163e948693eSPhilip Paeps int n; 1164a45a0da1SAndrew Rybchenko uint64_t dma_addr = tso->dma_addr; 1165a45a0da1SAndrew Rybchenko boolean_t eop; 1166e948693eSPhilip Paeps 1167e948693eSPhilip Paeps if (tso->in_len == 0 || tso->packet_space == 0) 1168e948693eSPhilip Paeps return; 1169e948693eSPhilip Paeps 1170e948693eSPhilip Paeps KASSERT(tso->in_len > 0, ("TSO input length went negative")); 1171e948693eSPhilip Paeps KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 1172e948693eSPhilip Paeps 1173a45a0da1SAndrew Rybchenko if (tso->fw_assisted & SFXGE_FATSOV2) { 1174a45a0da1SAndrew Rybchenko n = tso->in_len; 1175a45a0da1SAndrew Rybchenko tso->out_len -= n; 1176a45a0da1SAndrew Rybchenko tso->seqnum += n; 1177a45a0da1SAndrew Rybchenko tso->in_len = 0; 1178a45a0da1SAndrew Rybchenko if (n < tso->packet_space) { 1179a45a0da1SAndrew Rybchenko tso->packet_space -= n; 1180a45a0da1SAndrew Rybchenko tso->segs_space--; 1181a45a0da1SAndrew Rybchenko } else { 1182a45a0da1SAndrew Rybchenko tso->packet_space = tso->seg_size - 1183a45a0da1SAndrew Rybchenko (n - tso->packet_space) % tso->seg_size; 1184a45a0da1SAndrew Rybchenko tso->segs_space = 1185a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 - 1186a45a0da1SAndrew Rybchenko (tso->packet_space != tso->seg_size); 1187a45a0da1SAndrew Rybchenko } 1188a45a0da1SAndrew Rybchenko } else { 1189e948693eSPhilip Paeps n = min(tso->in_len, tso->packet_space); 1190e948693eSPhilip Paeps tso->packet_space -= n; 1191e948693eSPhilip Paeps tso->out_len -= n; 1192a45a0da1SAndrew Rybchenko tso->dma_addr += n; 1193e948693eSPhilip Paeps tso->in_len -= n; 1194a45a0da1SAndrew Rybchenko } 1195a45a0da1SAndrew Rybchenko 1196a45a0da1SAndrew Rybchenko /* 1197a45a0da1SAndrew Rybchenko * It is OK to use binary OR below to avoid extra branching 1198a45a0da1SAndrew Rybchenko * since all conditions may always be checked. 1199a45a0da1SAndrew Rybchenko */ 1200a45a0da1SAndrew Rybchenko eop = (tso->out_len == 0) | (tso->packet_space == 0) | 1201a45a0da1SAndrew Rybchenko (tso->segs_space == 0); 1202e948693eSPhilip Paeps 1203e948693eSPhilip Paeps desc = &txq->pend_desc[txq->n_pend_desc++]; 1204a45a0da1SAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc); 1205e948693eSPhilip Paeps } 1206e948693eSPhilip Paeps 1207e948693eSPhilip Paeps /* Callback from bus_dmamap_load() for long TSO headers. */ 1208e948693eSPhilip Paeps static void tso_map_long_header(void *dma_addr_ret, 1209e948693eSPhilip Paeps bus_dma_segment_t *segs, int nseg, 1210e948693eSPhilip Paeps int error) 1211e948693eSPhilip Paeps { 1212e948693eSPhilip Paeps *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 1213e948693eSPhilip Paeps __predict_true(nseg == 1)) ? 1214e948693eSPhilip Paeps segs->ds_addr : 0); 1215e948693eSPhilip Paeps } 1216e948693eSPhilip Paeps 1217e948693eSPhilip Paeps /* 1218e948693eSPhilip Paeps * tso_start_new_packet - generate a new header and prepare for the new packet 1219e948693eSPhilip Paeps * 1220e948693eSPhilip Paeps * Generate a new header and prepare for the new packet. Return 0 on 1221e948693eSPhilip Paeps * success, or an error code if failed to alloc header. 1222e948693eSPhilip Paeps */ 1223e948693eSPhilip Paeps static int tso_start_new_packet(struct sfxge_txq *txq, 1224e948693eSPhilip Paeps struct sfxge_tso_state *tso, 12253c838a9fSAndrew Rybchenko unsigned int *idp) 1226e948693eSPhilip Paeps { 12273c838a9fSAndrew Rybchenko unsigned int id = *idp; 1228e948693eSPhilip Paeps struct tcphdr *tsoh_th; 1229e948693eSPhilip Paeps unsigned ip_length; 1230e948693eSPhilip Paeps caddr_t header; 1231e948693eSPhilip Paeps uint64_t dma_addr; 1232e948693eSPhilip Paeps bus_dmamap_t map; 12333c838a9fSAndrew Rybchenko efx_desc_t *desc; 1234e948693eSPhilip Paeps int rc; 1235e948693eSPhilip Paeps 12363c838a9fSAndrew Rybchenko if (tso->fw_assisted) { 1237a45a0da1SAndrew Rybchenko if (tso->fw_assisted & SFXGE_FATSOV2) { 1238a45a0da1SAndrew Rybchenko /* Add 2 FATSOv2 option descriptors */ 1239a45a0da1SAndrew Rybchenko desc = &txq->pend_desc[txq->n_pend_desc]; 1240a45a0da1SAndrew Rybchenko efx_tx_qdesc_tso2_create(txq->common, 1241a45a0da1SAndrew Rybchenko tso->packet_id, 12424142e8cfSAndrew Rybchenko 0, 1243a45a0da1SAndrew Rybchenko tso->seqnum, 1244a45a0da1SAndrew Rybchenko tso->seg_size, 1245a45a0da1SAndrew Rybchenko desc, 1246a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_OPT_NDESCS); 1247a45a0da1SAndrew Rybchenko desc += EFX_TX_FATSOV2_OPT_NDESCS; 1248a45a0da1SAndrew Rybchenko txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS; 1249a45a0da1SAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1250a45a0da1SAndrew Rybchenko id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask; 1251a45a0da1SAndrew Rybchenko 1252a45a0da1SAndrew Rybchenko tso->segs_space = 1253a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1; 1254a45a0da1SAndrew Rybchenko } else { 1255a32efb97SAndrew Rybchenko uint8_t tcp_flags = tso->tcp_flags; 12563c838a9fSAndrew Rybchenko 12573c838a9fSAndrew Rybchenko if (tso->out_len > tso->seg_size) 12583c838a9fSAndrew Rybchenko tcp_flags &= ~(TH_FIN | TH_PUSH); 12593c838a9fSAndrew Rybchenko 1260a45a0da1SAndrew Rybchenko /* Add FATSOv1 option descriptor */ 12613c838a9fSAndrew Rybchenko desc = &txq->pend_desc[txq->n_pend_desc++]; 12623c838a9fSAndrew Rybchenko efx_tx_qdesc_tso_create(txq->common, 12633c838a9fSAndrew Rybchenko tso->packet_id, 12643c838a9fSAndrew Rybchenko tso->seqnum, 12653c838a9fSAndrew Rybchenko tcp_flags, 12663c838a9fSAndrew Rybchenko desc++); 12673c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 12683c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 12693c838a9fSAndrew Rybchenko 1270a45a0da1SAndrew Rybchenko tso->seqnum += tso->seg_size; 1271a45a0da1SAndrew Rybchenko tso->segs_space = UINT_MAX; 1272a45a0da1SAndrew Rybchenko } 1273a45a0da1SAndrew Rybchenko 12743c838a9fSAndrew Rybchenko /* Header DMA descriptor */ 12753c838a9fSAndrew Rybchenko *desc = tso->header_desc; 12763c838a9fSAndrew Rybchenko txq->n_pend_desc++; 12773c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 12783c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 12793c838a9fSAndrew Rybchenko } else { 1280e948693eSPhilip Paeps /* Allocate a DMA-mapped header buffer. */ 1281e948693eSPhilip Paeps if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 1282e948693eSPhilip Paeps unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 1283e948693eSPhilip Paeps unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 1284e948693eSPhilip Paeps 1285e948693eSPhilip Paeps header = (txq->tsoh_buffer[page_index].esm_base + 1286e948693eSPhilip Paeps buf_index * TSOH_STD_SIZE); 1287e948693eSPhilip Paeps dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 1288e948693eSPhilip Paeps buf_index * TSOH_STD_SIZE); 1289e948693eSPhilip Paeps map = txq->tsoh_buffer[page_index].esm_map; 1290e948693eSPhilip Paeps 12913c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, 12923c838a9fSAndrew Rybchenko ("stmp flags are not 0")); 1293e948693eSPhilip Paeps } else { 12943c838a9fSAndrew Rybchenko struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 12953c838a9fSAndrew Rybchenko 1296e948693eSPhilip Paeps /* We cannot use bus_dmamem_alloc() as that may sleep */ 1297e948693eSPhilip Paeps header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 1298e948693eSPhilip Paeps if (__predict_false(!header)) 1299b7b0edd1SGeorge V. Neville-Neil return (ENOMEM); 1300e948693eSPhilip Paeps rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 1301e948693eSPhilip Paeps header, tso->header_len, 1302e948693eSPhilip Paeps tso_map_long_header, &dma_addr, 1303e948693eSPhilip Paeps BUS_DMA_NOWAIT); 1304e948693eSPhilip Paeps if (__predict_false(dma_addr == 0)) { 1305e948693eSPhilip Paeps if (rc == 0) { 1306e948693eSPhilip Paeps /* Succeeded but got >1 segment */ 1307e948693eSPhilip Paeps bus_dmamap_unload(txq->packet_dma_tag, 1308e948693eSPhilip Paeps stmp->map); 1309e948693eSPhilip Paeps rc = EINVAL; 1310e948693eSPhilip Paeps } 1311e948693eSPhilip Paeps free(header, M_SFXGE); 1312b7b0edd1SGeorge V. Neville-Neil return (rc); 1313e948693eSPhilip Paeps } 1314e948693eSPhilip Paeps map = stmp->map; 1315e948693eSPhilip Paeps 1316e948693eSPhilip Paeps txq->tso_long_headers++; 1317e948693eSPhilip Paeps stmp->u.heap_buf = header; 1318e948693eSPhilip Paeps stmp->flags = TX_BUF_UNMAP; 1319e948693eSPhilip Paeps } 1320e948693eSPhilip Paeps 1321e948693eSPhilip Paeps tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 1322e948693eSPhilip Paeps 1323e948693eSPhilip Paeps /* Copy and update the headers. */ 1324a35485aaSAndrew Rybchenko m_copydata(tso->mbuf, 0, tso->header_len, header); 1325e948693eSPhilip Paeps 1326e948693eSPhilip Paeps tsoh_th->th_seq = htonl(tso->seqnum); 1327d0f73877SAndrew Rybchenko tso->seqnum += tso->seg_size; 1328d0f73877SAndrew Rybchenko if (tso->out_len > tso->seg_size) { 1329e948693eSPhilip Paeps /* This packet will not finish the TSO burst. */ 1330d0f73877SAndrew Rybchenko ip_length = tso->header_len - tso->nh_off + tso->seg_size; 1331e948693eSPhilip Paeps tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 1332e948693eSPhilip Paeps } else { 1333e948693eSPhilip Paeps /* This packet will be the last in the TSO burst. */ 1334e948693eSPhilip Paeps ip_length = tso->header_len - tso->nh_off + tso->out_len; 1335e948693eSPhilip Paeps } 1336e948693eSPhilip Paeps 1337e948693eSPhilip Paeps if (tso->protocol == htons(ETHERTYPE_IP)) { 1338e948693eSPhilip Paeps struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 1339e948693eSPhilip Paeps tsoh_iph->ip_len = htons(ip_length); 1340e948693eSPhilip Paeps /* XXX We should increment ip_id, but FreeBSD doesn't 1341e948693eSPhilip Paeps * currently allocate extra IDs for multiple segments. 1342e948693eSPhilip Paeps */ 1343e948693eSPhilip Paeps } else { 1344e948693eSPhilip Paeps struct ip6_hdr *tsoh_iph = 1345e948693eSPhilip Paeps (struct ip6_hdr *)(header + tso->nh_off); 1346e948693eSPhilip Paeps tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 1347e948693eSPhilip Paeps } 1348e948693eSPhilip Paeps 1349e948693eSPhilip Paeps /* Make the header visible to the hardware. */ 1350e948693eSPhilip Paeps bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 1351e948693eSPhilip Paeps 1352e948693eSPhilip Paeps /* Form a descriptor for this header. */ 1353e948693eSPhilip Paeps desc = &txq->pend_desc[txq->n_pend_desc++]; 13543c838a9fSAndrew Rybchenko efx_tx_qdesc_dma_create(txq->common, 13553c838a9fSAndrew Rybchenko dma_addr, 13563c838a9fSAndrew Rybchenko tso->header_len, 13573c838a9fSAndrew Rybchenko 0, 13583c838a9fSAndrew Rybchenko desc); 13593c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 1360a45a0da1SAndrew Rybchenko 1361a45a0da1SAndrew Rybchenko tso->segs_space = UINT_MAX; 13623c838a9fSAndrew Rybchenko } 13633c838a9fSAndrew Rybchenko tso->packet_space = tso->seg_size; 13643c838a9fSAndrew Rybchenko txq->tso_packets++; 13653c838a9fSAndrew Rybchenko *idp = id; 1366e948693eSPhilip Paeps 1367b7b0edd1SGeorge V. Neville-Neil return (0); 1368e948693eSPhilip Paeps } 1369e948693eSPhilip Paeps 1370e948693eSPhilip Paeps static int 1371e948693eSPhilip Paeps sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 13723c838a9fSAndrew Rybchenko const bus_dma_segment_t *dma_seg, int n_dma_seg, 13738b447157SAndrew Rybchenko int n_extra_descs) 1374e948693eSPhilip Paeps { 1375e948693eSPhilip Paeps struct sfxge_tso_state tso; 13763c838a9fSAndrew Rybchenko unsigned int id; 1377a35485aaSAndrew Rybchenko unsigned skipped = 0; 1378e948693eSPhilip Paeps 13793c838a9fSAndrew Rybchenko tso_start(txq, &tso, dma_seg, mbuf); 1380e948693eSPhilip Paeps 1381a35485aaSAndrew Rybchenko while (dma_seg->ds_len + skipped <= tso.header_len) { 1382a35485aaSAndrew Rybchenko skipped += dma_seg->ds_len; 1383e948693eSPhilip Paeps --n_dma_seg; 1384e948693eSPhilip Paeps KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1385e948693eSPhilip Paeps ++dma_seg; 1386e948693eSPhilip Paeps } 1387cfaf34ffSAndrew Rybchenko tso.in_len = dma_seg->ds_len - (tso.header_len - skipped); 1388a35485aaSAndrew Rybchenko tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); 1389e948693eSPhilip Paeps 13908b447157SAndrew Rybchenko id = (txq->added + n_extra_descs) & txq->ptr_mask; 13913c838a9fSAndrew Rybchenko if (__predict_false(tso_start_new_packet(txq, &tso, &id))) 1392385b1d8eSGeorge V. Neville-Neil return (-1); 1393e948693eSPhilip Paeps 1394e948693eSPhilip Paeps while (1) { 1395e948693eSPhilip Paeps tso_fill_packet_with_fragment(txq, &tso); 13963c838a9fSAndrew Rybchenko /* Exactly one DMA descriptor is added */ 13973c838a9fSAndrew Rybchenko KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 13983c838a9fSAndrew Rybchenko id = (id + 1) & txq->ptr_mask; 1399e948693eSPhilip Paeps 1400e948693eSPhilip Paeps /* Move onto the next fragment? */ 1401e948693eSPhilip Paeps if (tso.in_len == 0) { 1402e948693eSPhilip Paeps --n_dma_seg; 1403e948693eSPhilip Paeps if (n_dma_seg == 0) 1404e948693eSPhilip Paeps break; 1405e948693eSPhilip Paeps ++dma_seg; 1406e948693eSPhilip Paeps tso.in_len = dma_seg->ds_len; 1407e948693eSPhilip Paeps tso.dma_addr = dma_seg->ds_addr; 1408e948693eSPhilip Paeps } 1409e948693eSPhilip Paeps 1410e948693eSPhilip Paeps /* End of packet? */ 1411a45a0da1SAndrew Rybchenko if ((tso.packet_space == 0) | (tso.segs_space == 0)) { 1412a45a0da1SAndrew Rybchenko unsigned int n_fatso_opt_desc = 1413a45a0da1SAndrew Rybchenko (tso.fw_assisted & SFXGE_FATSOV2) ? 1414a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_OPT_NDESCS : 1415a45a0da1SAndrew Rybchenko (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0; 1416a45a0da1SAndrew Rybchenko 1417e948693eSPhilip Paeps /* If the queue is now full due to tiny MSS, 1418e948693eSPhilip Paeps * or we can't create another header, discard 1419e948693eSPhilip Paeps * the remainder of the input mbuf but do not 1420e948693eSPhilip Paeps * roll back the work we have done. 1421e948693eSPhilip Paeps */ 1422a45a0da1SAndrew Rybchenko if (txq->n_pend_desc + n_fatso_opt_desc + 1423a45a0da1SAndrew Rybchenko 1 /* header */ + n_dma_seg > txq->max_pkt_desc) { 1424e1a3d10eSAndrew Rybchenko txq->tso_pdrop_too_many++; 1425e948693eSPhilip Paeps break; 1426e1a3d10eSAndrew Rybchenko } 1427e948693eSPhilip Paeps if (__predict_false(tso_start_new_packet(txq, &tso, 14283c838a9fSAndrew Rybchenko &id))) { 1429e1a3d10eSAndrew Rybchenko txq->tso_pdrop_no_rsrc++; 1430e948693eSPhilip Paeps break; 1431e1a3d10eSAndrew Rybchenko } 1432e948693eSPhilip Paeps } 1433e948693eSPhilip Paeps } 1434e948693eSPhilip Paeps 1435e948693eSPhilip Paeps txq->tso_bursts++; 1436b7b0edd1SGeorge V. Neville-Neil return (id); 1437e948693eSPhilip Paeps } 1438e948693eSPhilip Paeps 1439e948693eSPhilip Paeps static void 1440e948693eSPhilip Paeps sfxge_tx_qunblock(struct sfxge_txq *txq) 1441e948693eSPhilip Paeps { 1442e948693eSPhilip Paeps struct sfxge_softc *sc; 1443e948693eSPhilip Paeps struct sfxge_evq *evq; 1444e948693eSPhilip Paeps 1445e948693eSPhilip Paeps sc = txq->sc; 1446e948693eSPhilip Paeps evq = sc->evq[txq->evq_index]; 1447e948693eSPhilip Paeps 1448763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 1449e948693eSPhilip Paeps 1450851128b8SAndrew Rybchenko if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) 1451e948693eSPhilip Paeps return; 1452e948693eSPhilip Paeps 1453763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1454e948693eSPhilip Paeps 1455e948693eSPhilip Paeps if (txq->blocked) { 1456e948693eSPhilip Paeps unsigned int level; 1457e948693eSPhilip Paeps 1458e948693eSPhilip Paeps level = txq->added - txq->completed; 14596d73545eSAndrew Rybchenko if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { 14606d73545eSAndrew Rybchenko /* reaped must be in sync with blocked */ 14616d73545eSAndrew Rybchenko sfxge_tx_qreap(txq); 1462e948693eSPhilip Paeps txq->blocked = 0; 1463e948693eSPhilip Paeps } 14646d73545eSAndrew Rybchenko } 1465e948693eSPhilip Paeps 1466e948693eSPhilip Paeps sfxge_tx_qdpl_service(txq); 1467e948693eSPhilip Paeps /* note: lock has been dropped */ 1468e948693eSPhilip Paeps } 1469e948693eSPhilip Paeps 1470e948693eSPhilip Paeps void 1471e948693eSPhilip Paeps sfxge_tx_qflush_done(struct sfxge_txq *txq) 1472e948693eSPhilip Paeps { 1473e948693eSPhilip Paeps 1474e948693eSPhilip Paeps txq->flush_state = SFXGE_FLUSH_DONE; 1475e948693eSPhilip Paeps } 1476e948693eSPhilip Paeps 1477e948693eSPhilip Paeps static void 1478e948693eSPhilip Paeps sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1479e948693eSPhilip Paeps { 1480e948693eSPhilip Paeps struct sfxge_txq *txq; 1481e948693eSPhilip Paeps struct sfxge_evq *evq; 1482e948693eSPhilip Paeps unsigned int count; 1483e948693eSPhilip Paeps 14843c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 14853c838a9fSAndrew Rybchenko 1486e948693eSPhilip Paeps txq = sc->txq[index]; 1487e948693eSPhilip Paeps evq = sc->evq[txq->evq_index]; 1488e948693eSPhilip Paeps 14893c838a9fSAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 1490763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1491e948693eSPhilip Paeps 1492e948693eSPhilip Paeps KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1493e948693eSPhilip Paeps ("txq->init_state != SFXGE_TXQ_STARTED")); 1494e948693eSPhilip Paeps 1495e948693eSPhilip Paeps txq->init_state = SFXGE_TXQ_INITIALIZED; 14963c838a9fSAndrew Rybchenko 14973c838a9fSAndrew Rybchenko if (txq->flush_state != SFXGE_FLUSH_DONE) { 1498e948693eSPhilip Paeps txq->flush_state = SFXGE_FLUSH_PENDING; 1499e948693eSPhilip Paeps 15003c838a9fSAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1501763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 1502e948693eSPhilip Paeps 15033c838a9fSAndrew Rybchenko /* Flush the transmit queue. */ 15043c838a9fSAndrew Rybchenko if (efx_tx_qflush(txq->common) != 0) { 15053c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Flushing Tx queue %u failed\n", 15063c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 15073c838a9fSAndrew Rybchenko txq->flush_state = SFXGE_FLUSH_DONE; 15083c838a9fSAndrew Rybchenko } else { 1509e948693eSPhilip Paeps count = 0; 1510e948693eSPhilip Paeps do { 1511e948693eSPhilip Paeps /* Spin for 100ms. */ 1512e948693eSPhilip Paeps DELAY(100000); 1513e948693eSPhilip Paeps if (txq->flush_state != SFXGE_FLUSH_PENDING) 1514e948693eSPhilip Paeps break; 1515e948693eSPhilip Paeps } while (++count < 20); 15163c838a9fSAndrew Rybchenko } 1517763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 1518763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1519e948693eSPhilip Paeps 1520e948693eSPhilip Paeps KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1521e948693eSPhilip Paeps ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1522e948693eSPhilip Paeps 15233c838a9fSAndrew Rybchenko if (txq->flush_state != SFXGE_FLUSH_DONE) { 15243c838a9fSAndrew Rybchenko /* Flush timeout */ 15253c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Cannot flush Tx queue %u\n", 15263c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 1527e948693eSPhilip Paeps txq->flush_state = SFXGE_FLUSH_DONE; 15283c838a9fSAndrew Rybchenko } 15293c838a9fSAndrew Rybchenko } 1530e948693eSPhilip Paeps 1531e948693eSPhilip Paeps txq->blocked = 0; 1532e948693eSPhilip Paeps txq->pending = txq->added; 1533e948693eSPhilip Paeps 1534cc933626SAndrew Rybchenko sfxge_tx_qcomplete(txq, evq); 1535e948693eSPhilip Paeps KASSERT(txq->completed == txq->added, 1536e948693eSPhilip Paeps ("txq->completed != txq->added")); 1537e948693eSPhilip Paeps 1538e948693eSPhilip Paeps sfxge_tx_qreap(txq); 1539e948693eSPhilip Paeps KASSERT(txq->reaped == txq->completed, 1540e948693eSPhilip Paeps ("txq->reaped != txq->completed")); 1541e948693eSPhilip Paeps 1542e948693eSPhilip Paeps txq->added = 0; 1543e948693eSPhilip Paeps txq->pending = 0; 1544e948693eSPhilip Paeps txq->completed = 0; 1545e948693eSPhilip Paeps txq->reaped = 0; 1546e948693eSPhilip Paeps 1547e948693eSPhilip Paeps /* Destroy the common code transmit queue. */ 1548e948693eSPhilip Paeps efx_tx_qdestroy(txq->common); 1549e948693eSPhilip Paeps txq->common = NULL; 1550e948693eSPhilip Paeps 1551e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1552385b1d8eSGeorge V. Neville-Neil EFX_TXQ_NBUFS(sc->txq_entries)); 1553e948693eSPhilip Paeps 15548b447157SAndrew Rybchenko txq->hw_cksum_flags = 0; 15558b447157SAndrew Rybchenko 1556763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1557763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 1558e948693eSPhilip Paeps } 1559e948693eSPhilip Paeps 1560a45a0da1SAndrew Rybchenko /* 1561a45a0da1SAndrew Rybchenko * Estimate maximum number of Tx descriptors required for TSO packet. 1562a45a0da1SAndrew Rybchenko * With minimum MSS and maximum mbuf length we might need more (even 1563a45a0da1SAndrew Rybchenko * than a ring-ful of descriptors), but this should not happen in 1564a45a0da1SAndrew Rybchenko * practice except due to deliberate attack. In that case we will 1565a45a0da1SAndrew Rybchenko * truncate the output at a packet boundary. 1566a45a0da1SAndrew Rybchenko */ 1567a45a0da1SAndrew Rybchenko static unsigned int 1568a45a0da1SAndrew Rybchenko sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type, 1569a45a0da1SAndrew Rybchenko unsigned int tso_fw_assisted) 1570a45a0da1SAndrew Rybchenko { 1571a45a0da1SAndrew Rybchenko /* One descriptor for every input fragment */ 1572a45a0da1SAndrew Rybchenko unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG; 1573a45a0da1SAndrew Rybchenko unsigned int sw_tso_max_descs; 1574a45a0da1SAndrew Rybchenko unsigned int fa_tso_v1_max_descs = 0; 1575a45a0da1SAndrew Rybchenko unsigned int fa_tso_v2_max_descs = 0; 1576a45a0da1SAndrew Rybchenko 15778b447157SAndrew Rybchenko /* Checksum offload Tx option descriptor may be required */ 15788b447157SAndrew Rybchenko if (sc->txq_dynamic_cksum_toggle_supported) 15798b447157SAndrew Rybchenko max_descs++; 15808b447157SAndrew Rybchenko 1581a45a0da1SAndrew Rybchenko /* VLAN tagging Tx option descriptor may be required */ 1582a45a0da1SAndrew Rybchenko if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled) 1583a45a0da1SAndrew Rybchenko max_descs++; 1584a45a0da1SAndrew Rybchenko 1585a45a0da1SAndrew Rybchenko if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) { 1586a45a0da1SAndrew Rybchenko /* 1587a45a0da1SAndrew Rybchenko * Plus header and payload descriptor for each output segment. 1588a45a0da1SAndrew Rybchenko * Minus one since header fragment is already counted. 1589a45a0da1SAndrew Rybchenko * Even if FATSO is used, we should be ready to fallback 1590a45a0da1SAndrew Rybchenko * to do it in the driver. 1591a45a0da1SAndrew Rybchenko */ 1592a45a0da1SAndrew Rybchenko sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1; 1593a45a0da1SAndrew Rybchenko 1594a45a0da1SAndrew Rybchenko /* FW assisted TSOv1 requires one more descriptor per segment 1595a45a0da1SAndrew Rybchenko * in comparison to SW TSO */ 1596a45a0da1SAndrew Rybchenko if (tso_fw_assisted & SFXGE_FATSOV1) 1597a45a0da1SAndrew Rybchenko fa_tso_v1_max_descs = 1598a45a0da1SAndrew Rybchenko sw_tso_max_descs + SFXGE_TSO_MAX_SEGS; 1599a45a0da1SAndrew Rybchenko 1600a45a0da1SAndrew Rybchenko /* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra 1601a45a0da1SAndrew Rybchenko * descriptors per superframe limited by number of DMA fetches 1602a45a0da1SAndrew Rybchenko * per packet. The first packet header is already counted. 1603a45a0da1SAndrew Rybchenko */ 1604a45a0da1SAndrew Rybchenko if (tso_fw_assisted & SFXGE_FATSOV2) { 1605a45a0da1SAndrew Rybchenko fa_tso_v2_max_descs = 1606a45a0da1SAndrew Rybchenko howmany(SFXGE_TX_MAPPING_MAX_SEG, 1607a45a0da1SAndrew Rybchenko EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) * 1608a45a0da1SAndrew Rybchenko (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1; 1609a45a0da1SAndrew Rybchenko } 1610a45a0da1SAndrew Rybchenko 1611a45a0da1SAndrew Rybchenko max_descs += MAX(sw_tso_max_descs, 1612a45a0da1SAndrew Rybchenko MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs)); 1613a45a0da1SAndrew Rybchenko } 1614a45a0da1SAndrew Rybchenko 1615a45a0da1SAndrew Rybchenko return (max_descs); 1616a45a0da1SAndrew Rybchenko } 1617a45a0da1SAndrew Rybchenko 1618e948693eSPhilip Paeps static int 1619e948693eSPhilip Paeps sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1620e948693eSPhilip Paeps { 1621e948693eSPhilip Paeps struct sfxge_txq *txq; 1622e948693eSPhilip Paeps efsys_mem_t *esmp; 1623e948693eSPhilip Paeps uint16_t flags; 1624a45a0da1SAndrew Rybchenko unsigned int tso_fw_assisted; 16258b447157SAndrew Rybchenko unsigned int label; 1626e948693eSPhilip Paeps struct sfxge_evq *evq; 16273c838a9fSAndrew Rybchenko unsigned int desc_index; 1628e948693eSPhilip Paeps int rc; 1629e948693eSPhilip Paeps 16303c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 16313c838a9fSAndrew Rybchenko 1632e948693eSPhilip Paeps txq = sc->txq[index]; 1633e948693eSPhilip Paeps esmp = &txq->mem; 1634e948693eSPhilip Paeps evq = sc->evq[txq->evq_index]; 1635e948693eSPhilip Paeps 1636e948693eSPhilip Paeps KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1637e948693eSPhilip Paeps ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1638e948693eSPhilip Paeps KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1639e948693eSPhilip Paeps ("evq->init_state != SFXGE_EVQ_STARTED")); 1640e948693eSPhilip Paeps 1641e948693eSPhilip Paeps /* Program the buffer table. */ 1642e948693eSPhilip Paeps if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1643385b1d8eSGeorge V. Neville-Neil EFX_TXQ_NBUFS(sc->txq_entries))) != 0) 1644385b1d8eSGeorge V. Neville-Neil return (rc); 1645e948693eSPhilip Paeps 1646e948693eSPhilip Paeps /* Determine the kind of queue we are creating. */ 1647a45a0da1SAndrew Rybchenko tso_fw_assisted = 0; 1648e948693eSPhilip Paeps switch (txq->type) { 1649e948693eSPhilip Paeps case SFXGE_TXQ_NON_CKSUM: 1650e948693eSPhilip Paeps flags = 0; 1651e948693eSPhilip Paeps break; 1652e948693eSPhilip Paeps case SFXGE_TXQ_IP_CKSUM: 16539dd0e15fSAndrew Rybchenko flags = EFX_TXQ_CKSUM_IPV4; 1654e948693eSPhilip Paeps break; 1655e948693eSPhilip Paeps case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 16569dd0e15fSAndrew Rybchenko flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; 1657a45a0da1SAndrew Rybchenko tso_fw_assisted = sc->tso_fw_assisted; 1658a45a0da1SAndrew Rybchenko if (tso_fw_assisted & SFXGE_FATSOV2) 1659a45a0da1SAndrew Rybchenko flags |= EFX_TXQ_FATSOV2; 1660e948693eSPhilip Paeps break; 1661e948693eSPhilip Paeps default: 1662e948693eSPhilip Paeps KASSERT(0, ("Impossible TX queue")); 1663e948693eSPhilip Paeps flags = 0; 1664e948693eSPhilip Paeps break; 1665e948693eSPhilip Paeps } 1666e948693eSPhilip Paeps 16678b447157SAndrew Rybchenko label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type; 16688b447157SAndrew Rybchenko 1669e948693eSPhilip Paeps /* Create the common code transmit queue. */ 16708b447157SAndrew Rybchenko if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, 1671385b1d8eSGeorge V. Neville-Neil sc->txq_entries, txq->buf_base_id, flags, evq->common, 1672a45a0da1SAndrew Rybchenko &txq->common, &desc_index)) != 0) { 1673a45a0da1SAndrew Rybchenko /* Retry if no FATSOv2 resources, otherwise fail */ 1674a45a0da1SAndrew Rybchenko if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2)) 1675a45a0da1SAndrew Rybchenko goto fail; 1676a45a0da1SAndrew Rybchenko 1677a45a0da1SAndrew Rybchenko /* Looks like all FATSOv2 contexts are used */ 1678a45a0da1SAndrew Rybchenko flags &= ~EFX_TXQ_FATSOV2; 1679a45a0da1SAndrew Rybchenko tso_fw_assisted &= ~SFXGE_FATSOV2; 16808b447157SAndrew Rybchenko if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, 1681a45a0da1SAndrew Rybchenko sc->txq_entries, txq->buf_base_id, flags, evq->common, 16823c838a9fSAndrew Rybchenko &txq->common, &desc_index)) != 0) 1683e948693eSPhilip Paeps goto fail; 1684a45a0da1SAndrew Rybchenko } 1685e948693eSPhilip Paeps 16863c838a9fSAndrew Rybchenko /* Initialise queue descriptor indexes */ 16873c838a9fSAndrew Rybchenko txq->added = txq->pending = txq->completed = txq->reaped = desc_index; 16883c838a9fSAndrew Rybchenko 1689763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK(txq); 1690e948693eSPhilip Paeps 1691e948693eSPhilip Paeps /* Enable the transmit queue. */ 1692e948693eSPhilip Paeps efx_tx_qenable(txq->common); 1693e948693eSPhilip Paeps 1694e948693eSPhilip Paeps txq->init_state = SFXGE_TXQ_STARTED; 16953c838a9fSAndrew Rybchenko txq->flush_state = SFXGE_FLUSH_REQUIRED; 1696a45a0da1SAndrew Rybchenko txq->tso_fw_assisted = tso_fw_assisted; 1697a45a0da1SAndrew Rybchenko 1698a45a0da1SAndrew Rybchenko txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type, 1699a45a0da1SAndrew Rybchenko tso_fw_assisted); 1700e948693eSPhilip Paeps 17011baf03a4SAndrew Rybchenko txq->hw_vlan_tci = 0; 17021baf03a4SAndrew Rybchenko 17038b447157SAndrew Rybchenko txq->hw_cksum_flags = flags & 17048b447157SAndrew Rybchenko (EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP); 17058b447157SAndrew Rybchenko 1706763cab71SAndrew Rybchenko SFXGE_TXQ_UNLOCK(txq); 1707e948693eSPhilip Paeps 1708e948693eSPhilip Paeps return (0); 1709e948693eSPhilip Paeps 1710e948693eSPhilip Paeps fail: 1711e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1712385b1d8eSGeorge V. Neville-Neil EFX_TXQ_NBUFS(sc->txq_entries)); 1713385b1d8eSGeorge V. Neville-Neil return (rc); 1714e948693eSPhilip Paeps } 1715e948693eSPhilip Paeps 1716e948693eSPhilip Paeps void 1717e948693eSPhilip Paeps sfxge_tx_stop(struct sfxge_softc *sc) 1718e948693eSPhilip Paeps { 1719e948693eSPhilip Paeps int index; 1720e948693eSPhilip Paeps 1721e2b05fe2SAndrew Rybchenko index = sc->txq_count; 1722e948693eSPhilip Paeps while (--index >= 0) 1723e2b05fe2SAndrew Rybchenko sfxge_tx_qstop(sc, index); 1724e948693eSPhilip Paeps 1725e948693eSPhilip Paeps /* Tear down the transmit module */ 1726e948693eSPhilip Paeps efx_tx_fini(sc->enp); 1727e948693eSPhilip Paeps } 1728e948693eSPhilip Paeps 1729e948693eSPhilip Paeps int 1730e948693eSPhilip Paeps sfxge_tx_start(struct sfxge_softc *sc) 1731e948693eSPhilip Paeps { 1732e948693eSPhilip Paeps int index; 1733e948693eSPhilip Paeps int rc; 1734e948693eSPhilip Paeps 1735e948693eSPhilip Paeps /* Initialize the common code transmit module. */ 1736e948693eSPhilip Paeps if ((rc = efx_tx_init(sc->enp)) != 0) 1737e948693eSPhilip Paeps return (rc); 1738e948693eSPhilip Paeps 1739e2b05fe2SAndrew Rybchenko for (index = 0; index < sc->txq_count; index++) { 1740e2b05fe2SAndrew Rybchenko if ((rc = sfxge_tx_qstart(sc, index)) != 0) 1741e948693eSPhilip Paeps goto fail; 1742e948693eSPhilip Paeps } 1743e948693eSPhilip Paeps 1744e948693eSPhilip Paeps return (0); 1745e948693eSPhilip Paeps 1746e948693eSPhilip Paeps fail: 1747e2b05fe2SAndrew Rybchenko while (--index >= 0) 1748e2b05fe2SAndrew Rybchenko sfxge_tx_qstop(sc, index); 1749e2b05fe2SAndrew Rybchenko 1750e948693eSPhilip Paeps efx_tx_fini(sc->enp); 1751e948693eSPhilip Paeps 1752e948693eSPhilip Paeps return (rc); 1753e948693eSPhilip Paeps } 1754e948693eSPhilip Paeps 1755f6222d7bSAndrew Rybchenko static int 1756f6222d7bSAndrew Rybchenko sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node) 1757f6222d7bSAndrew Rybchenko { 1758f6222d7bSAndrew Rybchenko struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev); 1759f6222d7bSAndrew Rybchenko struct sysctl_oid *stat_node; 1760f6222d7bSAndrew Rybchenko unsigned int id; 1761f6222d7bSAndrew Rybchenko 1762f6222d7bSAndrew Rybchenko stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 1763f6222d7bSAndrew Rybchenko "stats", CTLFLAG_RD, NULL, 1764f6222d7bSAndrew Rybchenko "Tx queue statistics"); 1765f6222d7bSAndrew Rybchenko if (stat_node == NULL) 1766f6222d7bSAndrew Rybchenko return (ENOMEM); 1767f6222d7bSAndrew Rybchenko 1768f6222d7bSAndrew Rybchenko for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1769f6222d7bSAndrew Rybchenko SYSCTL_ADD_ULONG( 1770f6222d7bSAndrew Rybchenko ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO, 1771f6222d7bSAndrew Rybchenko sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS, 1772f6222d7bSAndrew Rybchenko (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset), 1773f6222d7bSAndrew Rybchenko ""); 1774f6222d7bSAndrew Rybchenko } 1775f6222d7bSAndrew Rybchenko 1776f6222d7bSAndrew Rybchenko return (0); 1777f6222d7bSAndrew Rybchenko } 1778f6222d7bSAndrew Rybchenko 1779e948693eSPhilip Paeps /** 1780e948693eSPhilip Paeps * Destroy a transmit queue. 1781e948693eSPhilip Paeps */ 1782e948693eSPhilip Paeps static void 1783e948693eSPhilip Paeps sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1784e948693eSPhilip Paeps { 1785e948693eSPhilip Paeps struct sfxge_txq *txq; 1786385b1d8eSGeorge V. Neville-Neil unsigned int nmaps; 1787e948693eSPhilip Paeps 1788e948693eSPhilip Paeps txq = sc->txq[index]; 1789e948693eSPhilip Paeps 1790e948693eSPhilip Paeps KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1791e948693eSPhilip Paeps ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1792e948693eSPhilip Paeps 1793e948693eSPhilip Paeps if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1794e948693eSPhilip Paeps tso_fini(txq); 1795e948693eSPhilip Paeps 1796e948693eSPhilip Paeps /* Free the context arrays. */ 1797e948693eSPhilip Paeps free(txq->pend_desc, M_SFXGE); 1798385b1d8eSGeorge V. Neville-Neil nmaps = sc->txq_entries; 1799b7b0edd1SGeorge V. Neville-Neil while (nmaps-- != 0) 1800e948693eSPhilip Paeps bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1801e948693eSPhilip Paeps free(txq->stmp, M_SFXGE); 1802e948693eSPhilip Paeps 1803e948693eSPhilip Paeps /* Release DMA memory mapping. */ 1804e948693eSPhilip Paeps sfxge_dma_free(&txq->mem); 1805e948693eSPhilip Paeps 1806e948693eSPhilip Paeps sc->txq[index] = NULL; 1807e948693eSPhilip Paeps 1808763cab71SAndrew Rybchenko SFXGE_TXQ_LOCK_DESTROY(txq); 1809e948693eSPhilip Paeps 1810e948693eSPhilip Paeps free(txq, M_SFXGE); 1811e948693eSPhilip Paeps } 1812e948693eSPhilip Paeps 1813e948693eSPhilip Paeps static int 1814e948693eSPhilip Paeps sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1815e948693eSPhilip Paeps enum sfxge_txq_type type, unsigned int evq_index) 1816e948693eSPhilip Paeps { 18176a09b206SAndrew Rybchenko const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); 1818bc85c897SGeorge V. Neville-Neil char name[16]; 181995caaf0fSAndrew Rybchenko struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1820bc85c897SGeorge V. Neville-Neil struct sysctl_oid *txq_node; 1821e948693eSPhilip Paeps struct sfxge_txq *txq; 1822e948693eSPhilip Paeps struct sfxge_evq *evq; 1823e948693eSPhilip Paeps struct sfxge_tx_dpl *stdp; 182495caaf0fSAndrew Rybchenko struct sysctl_oid *dpl_node; 1825e948693eSPhilip Paeps efsys_mem_t *esmp; 1826e948693eSPhilip Paeps unsigned int nmaps; 1827e948693eSPhilip Paeps int rc; 1828e948693eSPhilip Paeps 1829e948693eSPhilip Paeps txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1830e948693eSPhilip Paeps txq->sc = sc; 1831385b1d8eSGeorge V. Neville-Neil txq->entries = sc->txq_entries; 1832385b1d8eSGeorge V. Neville-Neil txq->ptr_mask = txq->entries - 1; 1833e948693eSPhilip Paeps 1834e948693eSPhilip Paeps sc->txq[txq_index] = txq; 1835e948693eSPhilip Paeps esmp = &txq->mem; 1836e948693eSPhilip Paeps 1837e948693eSPhilip Paeps evq = sc->evq[evq_index]; 1838e948693eSPhilip Paeps 1839e948693eSPhilip Paeps /* Allocate and zero DMA space for the descriptor ring. */ 1840385b1d8eSGeorge V. Neville-Neil if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) 1841e948693eSPhilip Paeps return (rc); 1842e948693eSPhilip Paeps 1843e948693eSPhilip Paeps /* Allocate buffer table entries. */ 1844385b1d8eSGeorge V. Neville-Neil sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), 1845e948693eSPhilip Paeps &txq->buf_base_id); 1846e948693eSPhilip Paeps 1847e948693eSPhilip Paeps /* Create a DMA tag for packet mappings. */ 18486a09b206SAndrew Rybchenko if (bus_dma_tag_create(sc->parent_dma_tag, 1, 18496a09b206SAndrew Rybchenko encp->enc_tx_dma_desc_boundary, 1850fb8ccc78SMarius Strobl MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 18516a09b206SAndrew Rybchenko NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 18526a09b206SAndrew Rybchenko encp->enc_tx_dma_desc_size_max, 0, NULL, NULL, 1853e948693eSPhilip Paeps &txq->packet_dma_tag) != 0) { 1854e948693eSPhilip Paeps device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1855e948693eSPhilip Paeps rc = ENOMEM; 1856e948693eSPhilip Paeps goto fail; 1857e948693eSPhilip Paeps } 1858e948693eSPhilip Paeps 1859e948693eSPhilip Paeps /* Allocate pending descriptor array for batching writes. */ 18603c838a9fSAndrew Rybchenko txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries, 1861e948693eSPhilip Paeps M_SFXGE, M_ZERO | M_WAITOK); 1862e948693eSPhilip Paeps 1863e948693eSPhilip Paeps /* Allocate and initialise mbuf DMA mapping array. */ 1864385b1d8eSGeorge V. Neville-Neil txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, 1865e948693eSPhilip Paeps M_SFXGE, M_ZERO | M_WAITOK); 1866385b1d8eSGeorge V. Neville-Neil for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { 1867e948693eSPhilip Paeps rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1868e948693eSPhilip Paeps &txq->stmp[nmaps].map); 1869e948693eSPhilip Paeps if (rc != 0) 1870e948693eSPhilip Paeps goto fail2; 1871e948693eSPhilip Paeps } 1872e948693eSPhilip Paeps 1873bc85c897SGeorge V. Neville-Neil snprintf(name, sizeof(name), "%u", txq_index); 187495caaf0fSAndrew Rybchenko txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node), 1875bc85c897SGeorge V. Neville-Neil OID_AUTO, name, CTLFLAG_RD, NULL, ""); 1876bc85c897SGeorge V. Neville-Neil if (txq_node == NULL) { 1877bc85c897SGeorge V. Neville-Neil rc = ENOMEM; 1878bc85c897SGeorge V. Neville-Neil goto fail_txq_node; 1879bc85c897SGeorge V. Neville-Neil } 1880bc85c897SGeorge V. Neville-Neil 1881e948693eSPhilip Paeps if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1882e948693eSPhilip Paeps (rc = tso_init(txq)) != 0) 1883e948693eSPhilip Paeps goto fail3; 1884e948693eSPhilip Paeps 1885e948693eSPhilip Paeps /* Initialize the deferred packet list. */ 1886e948693eSPhilip Paeps stdp = &txq->dpl; 1887060a95efSGeorge V. Neville-Neil stdp->std_put_max = sfxge_tx_dpl_put_max; 1888060a95efSGeorge V. Neville-Neil stdp->std_get_max = sfxge_tx_dpl_get_max; 188993929f25SAndrew Rybchenko stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; 1890e948693eSPhilip Paeps stdp->std_getp = &stdp->std_get; 1891e948693eSPhilip Paeps 189233d45dc5SAndrew Rybchenko SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); 1893bc85c897SGeorge V. Neville-Neil 189495caaf0fSAndrew Rybchenko dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 189595caaf0fSAndrew Rybchenko "dpl", CTLFLAG_RD, NULL, 189695caaf0fSAndrew Rybchenko "Deferred packet list statistics"); 189795caaf0fSAndrew Rybchenko if (dpl_node == NULL) { 189895caaf0fSAndrew Rybchenko rc = ENOMEM; 189995caaf0fSAndrew Rybchenko goto fail_dpl_node; 190095caaf0fSAndrew Rybchenko } 190195caaf0fSAndrew Rybchenko 190295caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 190395caaf0fSAndrew Rybchenko "get_count", CTLFLAG_RD | CTLFLAG_STATS, 1904bc85c897SGeorge V. Neville-Neil &stdp->std_get_count, 0, ""); 190595caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 190695caaf0fSAndrew Rybchenko "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, 190793929f25SAndrew Rybchenko &stdp->std_get_non_tcp_count, 0, ""); 190895caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 190995caaf0fSAndrew Rybchenko "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 191093929f25SAndrew Rybchenko &stdp->std_get_hiwat, 0, ""); 191195caaf0fSAndrew Rybchenko SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 191295caaf0fSAndrew Rybchenko "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1913bce6d281SAndrew Rybchenko &stdp->std_put_hiwat, 0, ""); 1914e948693eSPhilip Paeps 1915f6222d7bSAndrew Rybchenko rc = sfxge_txq_stat_init(txq, txq_node); 1916f6222d7bSAndrew Rybchenko if (rc != 0) 1917f6222d7bSAndrew Rybchenko goto fail_txq_stat_init; 1918f6222d7bSAndrew Rybchenko 1919e948693eSPhilip Paeps txq->type = type; 1920e948693eSPhilip Paeps txq->evq_index = evq_index; 1921e948693eSPhilip Paeps txq->init_state = SFXGE_TXQ_INITIALIZED; 19223c838a9fSAndrew Rybchenko 1923e948693eSPhilip Paeps return (0); 1924e948693eSPhilip Paeps 1925f6222d7bSAndrew Rybchenko fail_txq_stat_init: 192695caaf0fSAndrew Rybchenko fail_dpl_node: 1927e948693eSPhilip Paeps fail3: 1928bc85c897SGeorge V. Neville-Neil fail_txq_node: 1929e948693eSPhilip Paeps free(txq->pend_desc, M_SFXGE); 1930e948693eSPhilip Paeps fail2: 1931b7b0edd1SGeorge V. Neville-Neil while (nmaps-- != 0) 1932e948693eSPhilip Paeps bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1933e948693eSPhilip Paeps free(txq->stmp, M_SFXGE); 1934e948693eSPhilip Paeps bus_dma_tag_destroy(txq->packet_dma_tag); 1935e948693eSPhilip Paeps 1936e948693eSPhilip Paeps fail: 1937e948693eSPhilip Paeps sfxge_dma_free(esmp); 1938e948693eSPhilip Paeps 1939e948693eSPhilip Paeps return (rc); 1940e948693eSPhilip Paeps } 1941e948693eSPhilip Paeps 1942e948693eSPhilip Paeps static int 1943e948693eSPhilip Paeps sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1944e948693eSPhilip Paeps { 1945e948693eSPhilip Paeps struct sfxge_softc *sc = arg1; 1946e948693eSPhilip Paeps unsigned int id = arg2; 1947e948693eSPhilip Paeps unsigned long sum; 1948e948693eSPhilip Paeps unsigned int index; 1949e948693eSPhilip Paeps 1950e948693eSPhilip Paeps /* Sum across all TX queues */ 1951e948693eSPhilip Paeps sum = 0; 1952e2b05fe2SAndrew Rybchenko for (index = 0; index < sc->txq_count; index++) 1953e948693eSPhilip Paeps sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1954e948693eSPhilip Paeps sfxge_tx_stats[id].offset); 1955e948693eSPhilip Paeps 1956b7b0edd1SGeorge V. Neville-Neil return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1957e948693eSPhilip Paeps } 1958e948693eSPhilip Paeps 1959e948693eSPhilip Paeps static void 1960e948693eSPhilip Paeps sfxge_tx_stat_init(struct sfxge_softc *sc) 1961e948693eSPhilip Paeps { 1962e948693eSPhilip Paeps struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1963e948693eSPhilip Paeps struct sysctl_oid_list *stat_list; 1964e948693eSPhilip Paeps unsigned int id; 1965e948693eSPhilip Paeps 1966e948693eSPhilip Paeps stat_list = SYSCTL_CHILDREN(sc->stats_node); 1967e948693eSPhilip Paeps 1968612d8e28SAndrew Rybchenko for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1969e948693eSPhilip Paeps SYSCTL_ADD_PROC( 1970e948693eSPhilip Paeps ctx, stat_list, 1971e948693eSPhilip Paeps OID_AUTO, sfxge_tx_stats[id].name, 1972e948693eSPhilip Paeps CTLTYPE_ULONG|CTLFLAG_RD, 1973e948693eSPhilip Paeps sc, id, sfxge_tx_stat_handler, "LU", 1974e948693eSPhilip Paeps ""); 1975e948693eSPhilip Paeps } 1976e948693eSPhilip Paeps } 1977e948693eSPhilip Paeps 19783d8fce27SAndrew Rybchenko uint64_t 19793d8fce27SAndrew Rybchenko sfxge_tx_get_drops(struct sfxge_softc *sc) 19803d8fce27SAndrew Rybchenko { 19813d8fce27SAndrew Rybchenko unsigned int index; 19823d8fce27SAndrew Rybchenko uint64_t drops = 0; 19833d8fce27SAndrew Rybchenko struct sfxge_txq *txq; 19843d8fce27SAndrew Rybchenko 19853d8fce27SAndrew Rybchenko /* Sum across all TX queues */ 19863d8fce27SAndrew Rybchenko for (index = 0; index < sc->txq_count; index++) { 19873d8fce27SAndrew Rybchenko txq = sc->txq[index]; 19883d8fce27SAndrew Rybchenko /* 19893d8fce27SAndrew Rybchenko * In theory, txq->put_overflow and txq->netdown_drops 19903d8fce27SAndrew Rybchenko * should use atomic operation and other should be 19913d8fce27SAndrew Rybchenko * obtained under txq lock, but it is just statistics. 19923d8fce27SAndrew Rybchenko */ 19933d8fce27SAndrew Rybchenko drops += txq->drops + txq->get_overflow + 19943d8fce27SAndrew Rybchenko txq->get_non_tcp_overflow + 19953d8fce27SAndrew Rybchenko txq->put_overflow + txq->netdown_drops + 19963d8fce27SAndrew Rybchenko txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; 19973d8fce27SAndrew Rybchenko } 19983d8fce27SAndrew Rybchenko return (drops); 19993d8fce27SAndrew Rybchenko } 20003d8fce27SAndrew Rybchenko 2001e948693eSPhilip Paeps void 2002e948693eSPhilip Paeps sfxge_tx_fini(struct sfxge_softc *sc) 2003e948693eSPhilip Paeps { 2004e948693eSPhilip Paeps int index; 2005e948693eSPhilip Paeps 2006e2b05fe2SAndrew Rybchenko index = sc->txq_count; 2007e948693eSPhilip Paeps while (--index >= 0) 2008e2b05fe2SAndrew Rybchenko sfxge_tx_qfini(sc, index); 2009e948693eSPhilip Paeps 2010e2b05fe2SAndrew Rybchenko sc->txq_count = 0; 2011e948693eSPhilip Paeps } 2012e948693eSPhilip Paeps 2013e948693eSPhilip Paeps 2014e948693eSPhilip Paeps int 2015e948693eSPhilip Paeps sfxge_tx_init(struct sfxge_softc *sc) 2016e948693eSPhilip Paeps { 20173c838a9fSAndrew Rybchenko const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); 2018e948693eSPhilip Paeps struct sfxge_intr *intr; 2019e948693eSPhilip Paeps int index; 2020e948693eSPhilip Paeps int rc; 2021e948693eSPhilip Paeps 2022e948693eSPhilip Paeps intr = &sc->intr; 2023e948693eSPhilip Paeps 2024e948693eSPhilip Paeps KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 2025e948693eSPhilip Paeps ("intr->state != SFXGE_INTR_INITIALIZED")); 2026e948693eSPhilip Paeps 20273bce7d0fSAndrew Rybchenko if (sfxge_tx_dpl_get_max <= 0) { 20283bce7d0fSAndrew Rybchenko log(LOG_ERR, "%s=%d must be greater than 0", 20293bce7d0fSAndrew Rybchenko SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); 20303bce7d0fSAndrew Rybchenko rc = EINVAL; 20313bce7d0fSAndrew Rybchenko goto fail_tx_dpl_get_max; 20323bce7d0fSAndrew Rybchenko } 20333bce7d0fSAndrew Rybchenko if (sfxge_tx_dpl_get_non_tcp_max <= 0) { 20343bce7d0fSAndrew Rybchenko log(LOG_ERR, "%s=%d must be greater than 0", 20353bce7d0fSAndrew Rybchenko SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, 20363bce7d0fSAndrew Rybchenko sfxge_tx_dpl_get_non_tcp_max); 20373bce7d0fSAndrew Rybchenko rc = EINVAL; 20383bce7d0fSAndrew Rybchenko goto fail_tx_dpl_get_non_tcp_max; 20393bce7d0fSAndrew Rybchenko } 20403bce7d0fSAndrew Rybchenko if (sfxge_tx_dpl_put_max < 0) { 20413bce7d0fSAndrew Rybchenko log(LOG_ERR, "%s=%d must be greater or equal to 0", 20423bce7d0fSAndrew Rybchenko SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); 20433bce7d0fSAndrew Rybchenko rc = EINVAL; 20443bce7d0fSAndrew Rybchenko goto fail_tx_dpl_put_max; 20453bce7d0fSAndrew Rybchenko } 20463bce7d0fSAndrew Rybchenko 2047e4b0a127SAndrew Rybchenko sc->txq_count = SFXGE_EVQ0_N_TXQ(sc) - 1 + sc->intr.n_alloc; 2048e2b05fe2SAndrew Rybchenko 20493c838a9fSAndrew Rybchenko sc->tso_fw_assisted = sfxge_tso_fw_assisted; 2050a45a0da1SAndrew Rybchenko if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) || 2051a45a0da1SAndrew Rybchenko (!encp->enc_fw_assisted_tso_enabled)) 2052a45a0da1SAndrew Rybchenko sc->tso_fw_assisted &= ~SFXGE_FATSOV1; 2053a45a0da1SAndrew Rybchenko if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) || 2054a45a0da1SAndrew Rybchenko (!encp->enc_fw_assisted_tso_v2_enabled)) 2055a45a0da1SAndrew Rybchenko sc->tso_fw_assisted &= ~SFXGE_FATSOV2; 20563c838a9fSAndrew Rybchenko 2057bc85c897SGeorge V. Neville-Neil sc->txqs_node = SYSCTL_ADD_NODE( 2058bc85c897SGeorge V. Neville-Neil device_get_sysctl_ctx(sc->dev), 2059bc85c897SGeorge V. Neville-Neil SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 2060bc85c897SGeorge V. Neville-Neil OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); 2061bc85c897SGeorge V. Neville-Neil if (sc->txqs_node == NULL) { 2062bc85c897SGeorge V. Neville-Neil rc = ENOMEM; 2063bc85c897SGeorge V. Neville-Neil goto fail_txq_node; 2064bc85c897SGeorge V. Neville-Neil } 2065bc85c897SGeorge V. Neville-Neil 2066e948693eSPhilip Paeps /* Initialize the transmit queues */ 20678b447157SAndrew Rybchenko if (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) { 2068e948693eSPhilip Paeps if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 2069e948693eSPhilip Paeps SFXGE_TXQ_NON_CKSUM, 0)) != 0) 2070e948693eSPhilip Paeps goto fail; 2071e948693eSPhilip Paeps 2072e948693eSPhilip Paeps if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 2073e948693eSPhilip Paeps SFXGE_TXQ_IP_CKSUM, 0)) != 0) 2074e948693eSPhilip Paeps goto fail2; 20758b447157SAndrew Rybchenko } 2076e948693eSPhilip Paeps 2077e2b05fe2SAndrew Rybchenko for (index = 0; 2078e4b0a127SAndrew Rybchenko index < sc->txq_count - SFXGE_EVQ0_N_TXQ(sc) + 1; 2079e2b05fe2SAndrew Rybchenko index++) { 2080e4b0a127SAndrew Rybchenko if ((rc = sfxge_tx_qinit(sc, SFXGE_EVQ0_N_TXQ(sc) - 1 + index, 2081e948693eSPhilip Paeps SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 2082e948693eSPhilip Paeps goto fail3; 2083e948693eSPhilip Paeps } 2084e948693eSPhilip Paeps 2085e948693eSPhilip Paeps sfxge_tx_stat_init(sc); 2086e948693eSPhilip Paeps 2087e948693eSPhilip Paeps return (0); 2088e948693eSPhilip Paeps 2089e948693eSPhilip Paeps fail3: 2090e948693eSPhilip Paeps while (--index >= 0) 2091e948693eSPhilip Paeps sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 2092e948693eSPhilip Paeps 2093e2b05fe2SAndrew Rybchenko sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 2094e2b05fe2SAndrew Rybchenko 2095e948693eSPhilip Paeps fail2: 2096e948693eSPhilip Paeps sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 2097e948693eSPhilip Paeps 2098e948693eSPhilip Paeps fail: 2099bc85c897SGeorge V. Neville-Neil fail_txq_node: 2100e2b05fe2SAndrew Rybchenko sc->txq_count = 0; 21013bce7d0fSAndrew Rybchenko fail_tx_dpl_put_max: 21023bce7d0fSAndrew Rybchenko fail_tx_dpl_get_non_tcp_max: 21033bce7d0fSAndrew Rybchenko fail_tx_dpl_get_max: 2104e948693eSPhilip Paeps return (rc); 2105e948693eSPhilip Paeps } 2106